HomeSort by relevance Sort by last modified time
    Searched full:movdqu (Results 76 - 100 of 186) sorted by null

1 2 34 5 6 7 8

  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
subpixel_ssse3.asm 322 movdqu xmm0, XMMWORD PTR [rsi - 2]
374 movdqu xmm1, XMMWORD PTR [rsi - 2]
    [all...]
  /external/boringssl/src/crypto/modes/asm/
ghash-x86_64.pl 500 movdqu ($Xip),$Hkey
528 movdqu $Hkey,0x00($Htbl) # save H
530 movdqu $Xi,0x10($Htbl) # save H^2
532 movdqu $T2,0x20($Htbl) # save Karatsuba "salt"
546 movdqu $T3,0x30($Htbl) # save H^3
548 movdqu $Xi,0x40($Htbl) # save H^4
550 movdqu $T2,0x50($Htbl) # save Karatsuba "salt"
572 movdqu ($Xip),$Xi
574 movdqu ($Htbl),$Hkey
575 movdqu 0x20($Htbl),$T
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
ssim_opt_x86_64.asm 90 movdqu xmm5, [rsi]
91 movdqu xmm6, [rdi]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_subpixel_8t_sse2.asm 566 movdqu xmm0, [rsi - 3] ;load src
641 movdqu xmm0, [rsi - 3] ;load src
716 movdqu xmm0, [rsi - 3] ;load src
736 movdqu xmm0, [rsi + 5] ;load src
801 movdqu xmm0, [rsi - 3] ;load src
867 movdqu xmm0, [rsi - 3] ;load src
933 movdqu xmm0, [rsi - 3] ;load src
953 movdqu xmm0, [rsi + 5] ;load src
vp9_postproc_sse2.asm 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2]
400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
402 movdqu xmm4, [sym(vp9_rv) + rcx*2]
660 movdqu xmm1,[rsi+rax] ; get the source
666 movdqu xmm2,[rdi+rax] ; get the noise for this line
668 movdqu [rsi+rax],xmm1 ; store the result
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_ssim_opt_x86_64.asm 90 movdqu xmm5, [rsi]
91 movdqu xmm6, [rdi]
vp9_variance_impl_sse2.asm 128 movdqu xmm1, XMMWORD PTR [rsi]
129 movdqu xmm2, XMMWORD PTR [rdi]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
ssim_opt.asm 90 movdqu xmm5, [rsi]
91 movdqu xmm6, [rdi]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_subpixel_8t_sse2.asm 566 movdqu xmm0, [rsi - 3] ;load src
641 movdqu xmm0, [rsi - 3] ;load src
716 movdqu xmm0, [rsi - 3] ;load src
736 movdqu xmm0, [rsi + 5] ;load src
801 movdqu xmm0, [rsi - 3] ;load src
867 movdqu xmm0, [rsi - 3] ;load src
933 movdqu xmm0, [rsi - 3] ;load src
953 movdqu xmm0, [rsi + 5] ;load src
vp9_postproc_sse2.asm 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2]
400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
402 movdqu xmm4, [sym(vp9_rv) + rcx*2]
661 movdqu xmm1,[rsi+rax] ; get the source
667 movdqu xmm2,[rdi+rax] ; get the noise for this line
669 movdqu [rsi+rax],xmm1 ; store the result
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_ssim_opt.asm 90 movdqu xmm5, [rsi]
91 movdqu xmm6, [rdi]
  /external/libvpx/libvpx/third_party/libyuv/source/
scale_posix.cc 208 "movdqu " MEMACCESS(0) ",%%xmm0 \n"
209 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
214 "movdqu %%xmm0," MEMACCESS(1) " \n"
238 "movdqu " MEMACCESS(0) ",%%xmm0 \n"
239 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
250 "movdqu %%xmm0," MEMACCESS(1) " \n"
274 "movdqu " MEMACCESS(0) ",%%xmm0 \n"
275 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
276 MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm
    [all...]
  /external/boringssl/mac-x86_64/crypto/bn/
modexp512-x86_64.S 1492 movdqu 0(%rsi),%xmm0
1493 movdqu 16(%rsi),%xmm1
1494 movdqu 32(%rsi),%xmm2
1495 movdqu 48(%rsi),%xmm3
1506 movdqu 0(%rdx),%xmm0
1507 movdqu 16(%rdx),%xmm1
1508 movdqu 32(%rdx),%xmm2
1509 movdqu 48(%rdx),%xmm3
1699 movdqu 0(%rdx),%xmm0
1700 movdqu 16(%rdx),%xmm
    [all...]
x86_64-mont.S 581 movdqu (%rsp,%r14,1),%xmm2
582 movdqu 16(%rsp,%r14,1),%xmm4
583 movdqu (%rdi,%r14,1),%xmm1
584 movdqu 16(%rdi,%r14,1),%xmm3
591 movdqu %xmm2,(%rdi,%r14,1)
592 movdqu %xmm4,16(%rdi,%r14,1)
  /external/boringssl/win-x86_64/crypto/bn/
modexp512-x86_64.asm 1501 movdqu xmm0,XMMWORD PTR[rsi]
1502 movdqu xmm1,XMMWORD PTR[16+rsi]
1503 movdqu xmm2,XMMWORD PTR[32+rsi]
1504 movdqu xmm3,XMMWORD PTR[48+rsi]
1515 movdqu xmm0,XMMWORD PTR[rdx]
1516 movdqu xmm1,XMMWORD PTR[16+rdx]
1517 movdqu xmm2,XMMWORD PTR[32+rdx]
1518 movdqu xmm3,XMMWORD PTR[48+rdx]
1708 movdqu xmm0,XMMWORD PTR[rdx]
1709 movdqu xmm1,XMMWORD PTR[16+rdx
    [all...]
x86_64-mont.asm 610 movdqu xmm2,XMMWORD[r14*1+rsp]
611 movdqu xmm4,XMMWORD[16+r14*1+rsp]
612 movdqu xmm1,XMMWORD[r14*1+rdi]
613 movdqu xmm3,XMMWORD[16+r14*1+rdi]
620 movdqu XMMWORD[r14*1+rdi],xmm2
621 movdqu XMMWORD[16+r14*1+rdi],xmm4
  /external/libyuv/files/source/
scale.cc 288 movdqu xmm0, [eax]
289 movdqu xmm1, [eax + 16]
295 movdqu [edx], xmm0 local
319 movdqu xmm0, [eax]
320 movdqu xmm1, [eax + 16]
321 movdqu xmm2, [eax + esi]
322 movdqu xmm3, [eax + esi + 16]
338 movdqu [edx], xmm0 local
551 // Note that movdqa+palign may be better than movdqu.
600 // Note that movdqa+palign may be better than movdqu
    [all...]
  /external/boringssl/win-x86/crypto/sha/
sha512-586.asm 394 movdqu xmm0,[edi]
398 movdqu xmm1,[16+edi]
404 movdqu xmm2,[32+edi]
410 movdqu xmm3,[48+edi]
416 movdqu xmm4,[64+edi]
422 movdqu xmm5,[80+edi]
429 movdqu xmm6,[96+edi]
436 movdqu xmm7,[112+edi]
1488 movdqu xmm0,[ebx]
1492 movdqu xmm1,[16+ebx
    [all...]
  /external/llvm/test/CodeGen/X86/
fast-isel-store.ll 21 ; CHECK: movdqu
unaligned-spill-folding.ll 37 ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill
  /external/boringssl/src/crypto/rc4/asm/
rc4-md5-x86_64.pl 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15);
261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15);
301 $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15);
340 $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15);
388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output
389 #rc4# movdqu %xmm3,16($out,$in0)
390 #rc4# movdqu %xmm4,32($out,$in0)
391 #rc4# movdqu %xmm5,48($out,$in0)
  /bionic/libc/arch-x86/atom/string/
ssse3-wcscpy-atom.S 114 movdqu (%ecx), %xmm1
115 movdqu %xmm1, (%edx)
597 movdqu (%ecx), %xmm0
598 movdqu %xmm0, (%edx)
645 movdqu (%ecx), %xmm0
646 movdqu %xmm0, (%edx)
  /external/boringssl/linux-x86_64/crypto/bn/
x86_64-mont.S 582 movdqu (%rsp,%r14,1),%xmm2
583 movdqu 16(%rsp,%r14,1),%xmm4
584 movdqu (%rdi,%r14,1),%xmm1
585 movdqu 16(%rdi,%r14,1),%xmm3
592 movdqu %xmm2,(%rdi,%r14,1)
593 movdqu %xmm4,16(%rdi,%r14,1)
modexp512-x86_64.S 1492 movdqu 0(%rsi),%xmm0
1493 movdqu 16(%rsi),%xmm1
1494 movdqu 32(%rsi),%xmm2
1495 movdqu 48(%rsi),%xmm3
1506 movdqu 0(%rdx),%xmm0
1507 movdqu 16(%rdx),%xmm1
1508 movdqu 32(%rdx),%xmm2
1509 movdqu 48(%rdx),%xmm3
1699 movdqu 0(%rdx),%xmm0
1700 movdqu 16(%rdx),%xmm
    [all...]
  /bionic/libc/arch-x86/silvermont/string/
sse2-memset-slm.S 207 movdqu %xmm0, (%edx)
208 movdqu %xmm0, -16(%edx, %ecx)
211 movdqu %xmm0, 16(%edx)
212 movdqu %xmm0, -32(%edx, %ecx)
221 movdqu %xmm0, (%edx)

Completed in 887 milliseconds

1 2 34 5 6 7 8