HomeSort by relevance Sort by last modified time
    Searched full:movdqa (Results 301 - 325 of 363) sorted by null

<<1112131415

  /external/chromium_org/third_party/libjpeg_turbo/simd/
jiss2flt-64.asm 442 movdqa xmm2,[rel PB_CENTERJSAMP] ; xmm2=[rel PB_CENTERJSAMP]
449 movdqa xmm4,xmm6 ; transpose coefficients(phase 2)
453 movdqa xmm7,xmm6 ; transpose coefficients(phase 3)
jiss2flt.asm 450 movdqa xmm2,[GOTOFF(ebx,PB_CENTERJSAMP)] ; xmm2=[PB_CENTERJSAMP]
457 movdqa xmm4,xmm6 ; transpose coefficients(phase 2)
461 movdqa xmm7,xmm6 ; transpose coefficients(phase 3)
  /external/openssl/crypto/bn/asm/
x86_64-mont.pl 653 movdqa %xmm0,(%rsp)
660 movdqa %xmm0,16(%rsp,$i)
662 movdqa %xmm0,32(%rsp,$i)
670 movdqa %xmm0,16(%rsp,$i)
1464 movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
1465 movdqa %xmm0,($nptr) # zap upper half of temporary vector
1472 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1473 movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
1474 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1475 movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vecto
    [all...]
x86_64-mont5.pl 788 movdqa %xmm0,(%rsp)
795 movdqa %xmm0,16(%rsp,$i)
797 movdqa %xmm0,32(%rsp,$i)
805 movdqa %xmm0,16(%rsp,$i)
868 .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
  /external/chromium_org/third_party/boringssl/src/crypto/bn/asm/
x86_64-mont5.pl     [all...]
rsaz-x86_64.pl 1404 movdqa %xmm0, (%rsp)
1405 movdqa %xmm0, 16(%rsp)
1406 movdqa %xmm0, 32(%rsp)
1407 movdqa %xmm0, 48(%rsp)
1408 movdqa %xmm0, 64(%rsp)
1409 movdqa %xmm0, 80(%rsp)
1410 movdqa %xmm0, 96(%rsp)
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/third_party/libyuv/source/
x86inc.asm 351 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
361 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
666 %define mova movdqa
874 %define %%regmov movdqa
  /external/chromium_org/third_party/libvpx/source/libvpx/third_party/x86inc/
x86inc.asm 456 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
466 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
768 %define mova movdqa
971 %define %%regmov movdqa
  /external/chromium_org/third_party/libyuv/source/
x86inc.asm 351 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
361 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
666 %define mova movdqa
874 %define %%regmov movdqa
  /external/chromium_org/third_party/x86inc/
x86inc.asm 329 movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
339 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
590 %define mova movdqa
764 %define %%regmov movdqa
  /external/libvpx/libvpx/third_party/x86inc/
x86inc.asm 439 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
449 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
747 %define mova movdqa
950 %define %%regmov movdqa
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/third_party/x86inc/
x86inc.asm 439 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
449 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
747 %define mova movdqa
950 %define %%regmov movdqa
  /bionic/libc/arch-x86/atom/string/
ssse3-strcpy-atom.S     [all...]
  /external/llvm/test/MC/Disassembler/X86/
x86-32.txt 301 # CHECK: movdqa %xmm1, %xmm0
304 # CHECK: movdqa %xmm0, %xmm1
simple-tests.txt 275 # CHECK: movdqa %xmm1, %xmm0
278 # CHECK: movdqa %xmm0, %xmm1
  /external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/
avx.asm 822 movdqa xmm1, xmm2 label
823 movdqa xmm1, [rax] label
824 movdqa xmm1, dqword [rax] label
825 movdqa [rax], xmm2 label
826 movdqa dqword [rax], xmm2 label
    [all...]
  /external/chromium_org/v8/src/ia32/
assembler-ia32.h     [all...]
  /external/libyuv/files/source/
convert.cc 81 movdqa xmm0, [eax]
84 movdqa [eax + edi], xmm0 local
100 "movdqa (%0),%%xmm0 \n"
103 "movdqa %%xmm0,(%0,%1) \n"
    [all...]
  /external/llvm/lib/Target/X86/
README-SSE.txt 39 movdqa %xmm2, %xmm0
53 movdqa %xmm0, %xmm2
662 movdqa LC0, %xmm0
679 movdqa .LC0(%rip), %xmm0
  /external/valgrind/main/memcheck/tests/amd64/
sse_memory.c 300 TEST_INSN( &AllMask, 16,movdqa)
  /external/pixman/pixman/
pixman.c 553 * When using -msse, gcc generates movdqa instructions assuming that
556 * causes the movdqa instructions to fail.
    [all...]
  /external/valgrind/main/none/tests/amd64/
insn_sse2.def 99 movdqa xmm.uq[0x012345678abcdef,0xfedcba9876543210] xmm.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
100 movdqa m128.uq[0x012345678abcdef,0xfedcba9876543210] xmm.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
101 movdqa xmm.uq[0x012345678abcdef,0xfedcba9876543210] m128.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
    [all...]
  /external/valgrind/main/none/tests/x86/
insn_sse2.def 99 movdqa xmm.uq[0x012345678abcdef,0xfedcba9876543210] xmm.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
100 movdqa m128.uq[0x012345678abcdef,0xfedcba9876543210] xmm.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
101 movdqa xmm.uq[0x012345678abcdef,0xfedcba9876543210] m128.uq[0x1212121234343434,0x5656565678787878] => 1.uq[0x012345678abcdef,0xfedcba9876543210]
    [all...]
  /external/llvm/test/CodeGen/X86/
vector-shuffle-128-v8.ll 374 ; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
384 ; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
  /external/chromium_org/v8/src/base/platform/
platform-posix.cc 86 // that requires 16 byte alignment such as movdqa on x86.

Completed in 990 milliseconds

<<1112131415