/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/x86/ |
sad_sse2.asm | 356 movdqu xmm0, XMMWORD PTR [rsi] 357 movdqu xmm1, XMMWORD PTR [rsi + 16] 358 movdqu xmm2, XMMWORD PTR [rsi + rax] 359 movdqu xmm3, XMMWORD PTR [rsi + rax + 16] 363 movdqu xmm4, XMMWORD PTR [rsi] 364 movdqu xmm5, XMMWORD PTR [rsi + 16] 365 movdqu xmm6, XMMWORD PTR [rsi + rax] 366 movdqu xmm7, XMMWORD PTR [rsi + rax + 16] 392 movdqu xmm0, XMMWORD PTR [rsi] 393 movdqu xmm1, XMMWORD PTR [rsi + 16 [all...] |
postproc_sse2.asm | 124 movdqu xmm0, XMMWORD PTR [rsi] 125 movdqu xmm1, XMMWORD PTR [rsi + rax] 126 movdqu xmm3, XMMWORD PTR [rsi + 2*rax] 132 movdqu xmm1, XMMWORD PTR [rsi + 2*rax] 133 movdqu xmm3, XMMWORD PTR [rsi + rax] 137 movdqu XMMWORD PTR [rdi], xmm0 178 movdqu xmm0, XMMWORD PTR [rdi + rdx] 179 movdqu xmm1, XMMWORD PTR [rdi + rdx -2] 180 movdqu xmm3, XMMWORD PTR [rdi + rdx -1] 184 movdqu xmm1, XMMWORD PTR [rdi + rdx +1 [all...] |
variance_impl_ssse3.asm | 67 movdqu xmm0, XMMWORD PTR [rsi] 68 movdqu xmm1, XMMWORD PTR [rsi+1] 92 movdqu xmm1, XMMWORD PTR [rsi] 93 movdqu xmm2, XMMWORD PTR [rsi+1] 163 movdqu xmm1, XMMWORD PTR [rsi] 173 movdqu xmm3, XMMWORD PTR [rsi] 264 movdqu xmm1, XMMWORD PTR [rsi] 265 movdqu xmm2, XMMWORD PTR [rsi+1]
|
/external/libvpx/libvpx/vp8/common/x86/ |
sad_sse2.asm | 356 movdqu xmm0, XMMWORD PTR [rsi] 357 movdqu xmm1, XMMWORD PTR [rsi + 16] 358 movdqu xmm2, XMMWORD PTR [rsi + rax] 359 movdqu xmm3, XMMWORD PTR [rsi + rax + 16] 363 movdqu xmm4, XMMWORD PTR [rsi] 364 movdqu xmm5, XMMWORD PTR [rsi + 16] 365 movdqu xmm6, XMMWORD PTR [rsi + rax] 366 movdqu xmm7, XMMWORD PTR [rsi + rax + 16] 392 movdqu xmm0, XMMWORD PTR [rsi] 393 movdqu xmm1, XMMWORD PTR [rsi + 16 [all...] |
postproc_sse2.asm | 124 movdqu xmm0, XMMWORD PTR [rsi] 125 movdqu xmm1, XMMWORD PTR [rsi + rax] 126 movdqu xmm3, XMMWORD PTR [rsi + 2*rax] 132 movdqu xmm1, XMMWORD PTR [rsi + 2*rax] 133 movdqu xmm3, XMMWORD PTR [rsi + rax] 137 movdqu XMMWORD PTR [rdi], xmm0 178 movdqu xmm0, XMMWORD PTR [rdi + rdx] 179 movdqu xmm1, XMMWORD PTR [rdi + rdx -2] 180 movdqu xmm3, XMMWORD PTR [rdi + rdx -1] 184 movdqu xmm1, XMMWORD PTR [rdi + rdx +1 [all...] |
variance_impl_ssse3.asm | 67 movdqu xmm0, XMMWORD PTR [rsi] 68 movdqu xmm1, XMMWORD PTR [rsi+1] 92 movdqu xmm1, XMMWORD PTR [rsi] 93 movdqu xmm2, XMMWORD PTR [rsi+1] 163 movdqu xmm1, XMMWORD PTR [rsi] 173 movdqu xmm3, XMMWORD PTR [rsi] 264 movdqu xmm1, XMMWORD PTR [rsi] 265 movdqu xmm2, XMMWORD PTR [rsi+1]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
sad_sse2.asm | 356 movdqu xmm0, XMMWORD PTR [rsi] 357 movdqu xmm1, XMMWORD PTR [rsi + 16] 358 movdqu xmm2, XMMWORD PTR [rsi + rax] 359 movdqu xmm3, XMMWORD PTR [rsi + rax + 16] 363 movdqu xmm4, XMMWORD PTR [rsi] 364 movdqu xmm5, XMMWORD PTR [rsi + 16] 365 movdqu xmm6, XMMWORD PTR [rsi + rax] 366 movdqu xmm7, XMMWORD PTR [rsi + rax + 16] 392 movdqu xmm0, XMMWORD PTR [rsi] 393 movdqu xmm1, XMMWORD PTR [rsi + 16 [all...] |
postproc_sse2.asm | 124 movdqu xmm0, XMMWORD PTR [rsi] 125 movdqu xmm1, XMMWORD PTR [rsi + rax] 126 movdqu xmm3, XMMWORD PTR [rsi + 2*rax] 132 movdqu xmm1, XMMWORD PTR [rsi + 2*rax] 133 movdqu xmm3, XMMWORD PTR [rsi + rax] 137 movdqu XMMWORD PTR [rdi], xmm0 178 movdqu xmm0, XMMWORD PTR [rdi + rdx] 179 movdqu xmm1, XMMWORD PTR [rdi + rdx -2] 180 movdqu xmm3, XMMWORD PTR [rdi + rdx -1] 184 movdqu xmm1, XMMWORD PTR [rdi + rdx +1 [all...] |
variance_impl_ssse3.asm | 67 movdqu xmm0, XMMWORD PTR [rsi] 68 movdqu xmm1, XMMWORD PTR [rsi+1] 92 movdqu xmm1, XMMWORD PTR [rsi] 93 movdqu xmm2, XMMWORD PTR [rsi+1] 163 movdqu xmm1, XMMWORD PTR [rsi] 173 movdqu xmm3, XMMWORD PTR [rsi] 264 movdqu xmm1, XMMWORD PTR [rsi] 265 movdqu xmm2, XMMWORD PTR [rsi+1]
|
/external/chromium_org/third_party/libyuv/source/ |
row_posix.cc | 286 "movdqu %%xmm0," MEMACCESS(1) " \n" 287 "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n" 311 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 312 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" 313 "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n" 352 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 353 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" 354 "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n" 405 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 458 "movdqu " MEMACCESS(0) ",%%xmm0 \n [all...] |
row_win.cc | 195 movdqu [edx], xmm0 local 196 movdqu [edx + 16], xmm1 local 216 movdqu xmm0, [eax] 217 movdqu xmm1, [eax + 16] 218 movdqu xmm3, [eax + 32] 256 movdqu xmm0, [eax] 257 movdqu xmm1, [eax + 16] 258 movdqu xmm3, [eax + 32] 316 movdqu xmm0, [eax] // fetch 8 pixels of bgr565 366 movdqu xmm0, [eax] // fetch 8 pixels of 155 459 movdqu [edx], xmm0 \/\/ store 0 local 464 movdqu [edx + 16], xmm1 \/\/ store 1 local 465 movdqu [edx + 32], xmm2 \/\/ store 2 local 498 movdqu [edx], xmm0 \/\/ store 0 local 503 movdqu [edx + 16], xmm1 \/\/ store 1 local 504 movdqu [edx + 32], xmm2 \/\/ store 2 local 803 movdqu [edx], xmm0 local 838 movdqu [edx], xmm0 local 906 movdqu [edx], xmm0 local 974 movdqu [edx], xmm0 local 1042 movdqu [edx], xmm0 local 1480 movdqu [edx], xmm0 local 1497 movdqu [edx + edi], xmm0 local 2356 movdqu [edx + 8], xmm1 \/\/ Last 16 bytes. = 24 bytes, 8 RGB pixels. local 2403 movdqu [edx + 8], xmm1 \/\/ Last 16 bytes. = 24 bytes, 8 RGB pixels. local 2477 movdqu [edx], xmm0 \/\/ store 8 pixels of RGB565 local 2683 movdqu [edx], xmm0 local 2684 movdqu [edx + 16], xmm1 local 2726 movdqu [edx], xmm0 local 2727 movdqu [edx + 16], xmm1 local 2771 movdqu [edx], xmm0 local 2772 movdqu [edx + 16], xmm1 local 2811 movdqu [edx], xmm0 local 2812 movdqu [edx + 16], xmm1 local 2849 movdqu [edx], xmm0 local 2850 movdqu [edx + 16], xmm1 local 2930 movdqu [edx], xmm5 local 2931 movdqu [edx + 16], xmm0 local 3012 movdqu [edx], xmm2 local 3013 movdqu [edx + 16], xmm1 local 3094 movdqu [edx], xmm5 local 3095 movdqu [edx + 16], xmm0 local 3237 movdqu [edx], xmm0 local 3401 movdqu [edx], xmm0 local 3402 movdqu [edx + edi], xmm2 local 3502 movdqu [edi], xmm0 local 3503 movdqu [edi + 16], xmm2 local 4145 movdqu [edx], xmm0 local 4355 movdqu [edx], xmm0 local 4809 movdqu [edx], xmm0 local 4902 movdqu [edx], xmm0 local 5314 movdqu [edx], xmm0 local 5348 movdqu [edx], xmm0 local 5394 movdqu [edx], xmm0 local 5823 movdqu [edi], xmm0 local 5874 movdqu [edi], xmm0 local 5982 movdqu [edx], xmm2 local 6446 movdqu [esi + edi], xmm0 local 6459 movdqu [esi + edi], xmm0 local 6471 movdqu [esi + edi], xmm0 local 6484 movdqu [esi + edi], xmm0 local 6494 movdqu [esi + edi], xmm0 local 6558 movdqu [esi + edi], xmm0 local 6571 movdqu [esi + edi], xmm0 local 6583 movdqu [esi + edi], xmm0 local 6596 movdqu [esi + edi], xmm0 local 6606 movdqu [esi + edi], xmm0 local 6772 movdqu [edx], xmm0 local 6773 movdqu [edx + 16], xmm1 local 6865 movdqu [edx], xmm0 local 6883 movdqu [edx], xmm0 local 6901 movdqu [edx], xmm0 local 6919 movdqu [edx], xmm0 local 6962 movdqu [edi], xmm0 local 6963 movdqu [edi + 16], xmm1 local 7000 movdqu [edi], xmm1 local 7001 movdqu [edi + 16], xmm2 local [all...] |
/external/chromium_org/third_party/libyuv/util/ |
psnr.cc | 96 movdqu xmm1, [eax] 97 movdqu xmm2, [eax + edx] 99 movdqu xmm3, xmm1 103 movdqu xmm2, xmm1 132 "movdqu (%0),%%xmm1 \n" 133 "movdqu (%0,%1,1),%%xmm2 \n" 135 "movdqu %%xmm1,%%xmm3 \n" 139 "movdqu %%xmm1,%%xmm2 \n"
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_subpel_variance_impl_sse2.asm | 45 movdqu xmm5, XMMWORD PTR [rsi] 46 movdqu xmm3, XMMWORD PTR [rsi+1] 52 movdqu xmm1, XMMWORD PTR [rsi] ; 53 movdqu xmm2, XMMWORD PTR [rsi+1] ; 158 movdqu xmm5, XMMWORD PTR [rsi] 163 movdqu xmm3, XMMWORD PTR [rsi] 268 movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15 269 movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
vp9_subpel_variance_impl_sse2.asm | 45 movdqu xmm5, XMMWORD PTR [rsi] 46 movdqu xmm3, XMMWORD PTR [rsi+1] 52 movdqu xmm1, XMMWORD PTR [rsi] ; 53 movdqu xmm2, XMMWORD PTR [rsi+1] ; 158 movdqu xmm5, XMMWORD PTR [rsi] 163 movdqu xmm3, XMMWORD PTR [rsi] 268 movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15 269 movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16
|
/external/chromium_org/third_party/openssl/openssl/crypto/aes/asm/ |
aesni-x86-mac.S | 305 movdqu (%esi),%xmm2 306 movdqu 16(%esi),%xmm3 307 movdqu 32(%esi),%xmm4 308 movdqu 48(%esi),%xmm5 309 movdqu 64(%esi),%xmm6 310 movdqu 80(%esi),%xmm7 317 movdqu (%esi),%xmm2 319 movdqu 16(%esi),%xmm3 321 movdqu 32(%esi),%xmm4 323 movdqu 48(%esi),%xmm [all...] |
vpaes-x86-mac.S | 73 movdqu (%edx),%xmm5 128 movdqu (%edx),%xmm5 150 movdqu (%edx),%xmm5 216 movdqu (%edx),%xmm0 230 movdqu (%esi),%xmm0 239 movdqu %xmm0,(%edx) 244 movdqu %xmm3,(%edx) 260 movdqu 8(%esi),%xmm0 280 movdqu 16(%esi),%xmm0 309 movdqu %xmm0,(%edx [all...] |
vpaes-x86.S | 76 movdqu (%edx),%xmm5 131 movdqu (%edx),%xmm5 155 movdqu (%edx),%xmm5 221 movdqu (%edx),%xmm0 237 movdqu (%esi),%xmm0 246 movdqu %xmm0,(%edx) 251 movdqu %xmm3,(%edx) 267 movdqu 8(%esi),%xmm0 287 movdqu 16(%esi),%xmm0 316 movdqu %xmm0,(%edx [all...] |
vpaes-x86_64.S | 27 movdqu (%r9),%xmm5 86 movdqu (%r9),%xmm5 118 movdqu (%r9),%xmm5 195 movdqu (%r9),%xmm0 225 movdqu (%rdi),%xmm0 238 movdqu %xmm0,(%rdx) 245 movdqu %xmm3,(%rdx) 289 movdqu 8(%rdi),%xmm0 321 movdqu 16(%rdi),%xmm0 372 movdqu %xmm0,(%rdx [all...] |
aesni-x86_64.S | 456 movdqu (%rdi),%xmm2 457 movdqu 16(%rdi),%xmm3 458 movdqu 32(%rdi),%xmm4 459 movdqu 48(%rdi),%xmm5 460 movdqu 64(%rdi),%xmm6 461 movdqu 80(%rdi),%xmm7 462 movdqu 96(%rdi),%xmm8 463 movdqu 112(%rdi),%xmm9 471 movdqu (%rdi),%xmm2 474 movdqu 16(%rdi),%xmm [all...] |
vpaes-x86.pl | 186 &movdqu ("xmm5",&QWP(0,$key)); 245 &movdqu ("xmm5",&QWP(0,$key)); 275 &movdqu ("xmm5",&QWP(0,$key)); 350 &movdqu ("xmm0",&QWP(0,$key)); 372 &movdqu ("xmm0",&QWP(0,$inp)); # load key (unaligned) 386 &movdqu (&QWP(0,$key),"xmm0"); 393 &movdqu (&QWP(0,$key),"xmm3"); 436 &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned) 467 &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned) 516 &movdqu (&QWP(0,$key),"xmm0"); # save last ke [all...] |
/external/openssl/crypto/aes/asm/ |
vpaes-x86.S | 76 movdqu (%edx),%xmm5 131 movdqu (%edx),%xmm5 155 movdqu (%edx),%xmm5 221 movdqu (%edx),%xmm0 237 movdqu (%esi),%xmm0 246 movdqu %xmm0,(%edx) 251 movdqu %xmm3,(%edx) 267 movdqu 8(%esi),%xmm0 287 movdqu 16(%esi),%xmm0 316 movdqu %xmm0,(%edx [all...] |
vpaes-x86_64.S | 27 movdqu (%r9),%xmm5 86 movdqu (%r9),%xmm5 118 movdqu (%r9),%xmm5 195 movdqu (%r9),%xmm0 225 movdqu (%rdi),%xmm0 238 movdqu %xmm0,(%rdx) 245 movdqu %xmm3,(%rdx) 289 movdqu 8(%rdi),%xmm0 321 movdqu 16(%rdi),%xmm0 372 movdqu %xmm0,(%rdx [all...] |
aesni-x86_64.S | 456 movdqu (%rdi),%xmm2 457 movdqu 16(%rdi),%xmm3 458 movdqu 32(%rdi),%xmm4 459 movdqu 48(%rdi),%xmm5 460 movdqu 64(%rdi),%xmm6 461 movdqu 80(%rdi),%xmm7 462 movdqu 96(%rdi),%xmm8 463 movdqu 112(%rdi),%xmm9 471 movdqu (%rdi),%xmm2 474 movdqu 16(%rdi),%xmm [all...] |
/external/libyuv/files/source/ |
row_win.cc | 249 movdqu xmm0, [eax] 250 movdqu xmm1, [eax + 16] 251 movdqu xmm3, [eax + 32] 289 movdqu xmm0, [eax] 290 movdqu xmm1, [eax + 16] 291 movdqu xmm3, [eax + 32] 349 movdqu xmm0, [eax] // fetch 8 pixels of bgr565 399 movdqu xmm0, [eax] // fetch 8 pixels of 1555 445 movdqu xmm0, [eax] // fetch 8 pixels of bgra4444 702 movdqu xmm0, [eax 718 movdqu [edx], xmm0 local 786 movdqu [edx], xmm0 local 854 movdqu [edx], xmm0 local 922 movdqu [edx], xmm0 local 1851 movdqu [edx], xmm0 local 1852 movdqu [edx + 16], xmm1 local 1894 movdqu [edx], xmm0 local 1895 movdqu [edx + 16], xmm1 local 1938 movdqu [edx], xmm0 local 1939 movdqu [edx + 16], xmm1 local 1978 movdqu [edx], xmm0 local 1979 movdqu [edx + 16], xmm1 local 2016 movdqu [edx], xmm0 local 2017 movdqu [edx + 16], xmm1 local 2097 movdqu [edx], xmm5 local 2098 movdqu [edx + 16], xmm0 local 2179 movdqu [edx], xmm2 local 2180 movdqu [edx + 16], xmm1 local 2261 movdqu [edx], xmm5 local 2262 movdqu [edx + 16], xmm0 local 2372 movdqu [edx], xmm0 local 2700 movdqu [edx], xmm0 local 2910 movdqu [edx], xmm0 local 3777 movdqu [edi], xmm0 local 3884 movdqu [edx], xmm2 local [all...] |
/external/chromium_org/v8/src/ia32/ |
codegen-ia32.cc | 257 __ movdqu(xmm0, Operand(src, 0)); 258 __ movdqu(Operand(dst, 0), xmm0); 279 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 280 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 325 __ movdqu(xmm0, Operand(src, -0x10)); 326 __ movdqu(Operand(dst, -0x10), xmm0); 347 __ movdqu(xmm0, Operand(src, 0)); 348 __ movdqu(Operand(dst, 0), xmm0); 397 __ movdqu(xmm0, Operand(src, 0)); 398 __ movdqu(xmm1, Operand(src, count, times_1, -0x10)) [all...] |