HomeSort by relevance Sort by last modified time
    Searched full:xmm12 (Results 26 - 50 of 87) sorted by null

12 3 4

  /external/boringssl/win-x86_64/crypto/aes/
aesni-x86_64.asm 1142 movaps XMMWORD[(-72)+rax],xmm12
1274 movdqu xmm12,XMMWORD[64+rdi]
1286 pxor xmm12,xmm6
1294 movdqu XMMWORD[64+rsi],xmm12
1478 movdqu xmm12,XMMWORD[32+rdi]
1481 pxor xmm12,xmm0
1506 movdqa xmm12,XMMWORD[16+rsp]
1520 movdqa xmm3,xmm12
1564 movups xmm12,XMMWORD[32+rdi]
1575 pxor xmm4,xmm12
    [all...]
vpaes-x86_64.asm 48 movdqa xmm0,xmm12
483 movdqa xmm0,xmm12
632 movaps XMMWORD[112+rsp],xmm12
651 movaps xmm12,XMMWORD[112+rsp]
683 movaps XMMWORD[112+rsp],xmm12
707 movaps xmm12,XMMWORD[112+rsp]
739 movaps XMMWORD[112+rsp],xmm12
754 movaps xmm12,XMMWORD[112+rsp]
785 movaps XMMWORD[112+rsp],xmm12
800 movaps xmm12,XMMWORD[112+rsp
    [all...]
  /external/boringssl/linux-x86_64/crypto/aes/
aesni-x86_64.S 1166 movdqu 64(%rdi),%xmm12
1178 pxor %xmm6,%xmm12
1186 movdqu %xmm12,64(%rsi)
1370 movdqu 32(%rdi),%xmm12
1373 pxor %xmm0,%xmm12
1398 movdqa 16(%rsp),%xmm12
1412 movdqa %xmm12,%xmm3
1456 movups 32(%rdi),%xmm12
1467 pxor %xmm12,%xmm4
1482 movups 96(%rdi),%xmm12
    [all...]
  /external/boringssl/mac-x86_64/crypto/aes/
aesni-x86_64.S 1165 movdqu 64(%rdi),%xmm12
1177 pxor %xmm6,%xmm12
1185 movdqu %xmm12,64(%rsi)
1369 movdqu 32(%rdi),%xmm12
1372 pxor %xmm0,%xmm12
1397 movdqa 16(%rsp),%xmm12
1411 movdqa %xmm12,%xmm3
1455 movups 32(%rdi),%xmm12
1466 pxor %xmm12,%xmm4
1481 movups 96(%rdi),%xmm12
    [all...]
  /art/runtime/arch/x86_64/
registers_x86_64.h 65 XMM12 = 12,
quick_method_frame_info_x86_64.h 39 (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
quick_entrypoints_x86_64.S 23 movq %xmm12, 0(%rsp)
31 movq 0(%rsp), %xmm12
64 movq %xmm12, 8(%rsp)
107 movq %xmm12, 8(%rsp)
128 movq 8(%rsp), %xmm12
182 movq %xmm12, 80(%rsp)
224 movq %xmm12, 80(%rsp)
244 movq 80(%rsp), %xmm12
667 movq 96(%rsi), %xmm12
    [all...]
  /external/valgrind/memcheck/tests/amd64/
fxsave-amd64.c 69 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm12");
86 asm __volatile__("movups " VG_SYM(vecZ) "(%rip), %xmm12");
120 asm __volatile__("movaps %xmm1, %xmm12");
  /art/compiler/jni/quick/x86_64/
calling_convention_x86_64.cc 135 callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM12));
147 return 1 << XMM12 | 1 << XMM13 | 1 << XMM14 | 1 << XMM15;
  /external/libvpx/libvpx/vp8/common/x86/
loopfilter_block_sse2_x86_64.asm 506 movdqa xmm12, xmm1
508 punpckhqdq xmm12, i4
531 movdqa i9, xmm12
541 movdqa xmm12, xmm6
546 LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
743 movdqa xmm12, xmm1
745 punpckhqdq xmm12, i12
768 movdqa s9, xmm12
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
loopfilter_block_sse2.asm 506 movdqa xmm12, xmm1
508 punpckhqdq xmm12, i4
531 movdqa i9, xmm12
541 movdqa xmm12, xmm6
546 LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
743 movdqa xmm12, xmm1
745 punpckhqdq xmm12, i12
768 movdqa s9, xmm12
  /external/boringssl/src/crypto/aes/asm/
vpaes-x86_64.pl 108 movdqa %xmm12, %xmm0 # 0 : sb1t
543 movdqa %xmm12, %xmm0 # 0 : sbot
685 movaps %xmm12,0x70(%rsp)
708 movaps 0x70(%rsp),%xmm12
733 movaps %xmm12,0x70(%rsp)
761 movaps 0x70(%rsp),%xmm12
786 movaps %xmm12,0x70(%rsp)
805 movaps 0x70(%rsp),%xmm12
829 movaps %xmm12,0x70(%rsp)
848 movaps 0x70(%rsp),%xmm12
    [all...]
bsaes-x86_64.pl 998 movdqa %xmm0, %xmm12
1010 pand %xmm6, %xmm12
1013 pcmpeqb %xmm0, %xmm12
1032 movdqa %xmm12, 0x40($out)
1177 movaps %xmm12, 0xa0(%rsp)
1337 movaps 0xa0(%rbp), %xmm12
1378 movaps %xmm12, 0xa0(%rsp)
    [all...]
  /external/boringssl/win-x86_64/crypto/modes/
ghash-x86_64.asm 957 pshufd xmm12,xmm11,78
958 pxor xmm12,xmm11
965 xorps xmm4,xmm12
972 pshufd xmm12,xmm11,78
974 pxor xmm12,xmm11
992 xorps xmm4,xmm12
1000 pshufd xmm12,xmm11,78
1002 pxor xmm12,xmm11
1050 pxor xmm4,xmm12
1051 pshufd xmm12,xmm11,7
    [all...]
  /external/valgrind/coregrind/m_gdbserver/
64bit-sse.xml 54 <reg name="xmm12" bitsize="128" type="vec128"/>
  /prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/include/
setjmp.h 138 SETJMP_FLOAT128 Xmm12;
  /external/libvpx/libvpx/third_party/libyuv/source/
rotate.cc 551 "movdqa %%xmm4,%%xmm12 \n"
553 "punpckhbw %%xmm5,%%xmm12 \n"
555 "movdqa %%xmm12,%%xmm13 \n"
590 "punpcklwd %%xmm14,%%xmm12 \n"
592 "movdqa %%xmm12,%%xmm14 \n"
622 "punpckldq %%xmm12,%%xmm8 \n"
624 "movdqa %%xmm8,%%xmm12 \n"
625 "palignr $0x8,%%xmm12,%%xmm12 \n"
626 "movq %%xmm12,(%1,%4) \n
    [all...]
  /external/libyuv/files/source/
rotate.cc 527 "movdqa %%xmm4,%%xmm12 \n"
529 "punpckhbw %%xmm5,%%xmm12 \n"
531 "movdqa %%xmm12,%%xmm13 \n"
566 "punpcklwd %%xmm14,%%xmm12 \n"
568 "movdqa %%xmm12,%%xmm14 \n"
598 "punpckldq %%xmm12,%%xmm8 \n"
600 "movdqa %%xmm8,%%xmm12 \n"
601 "palignr $0x8,%%xmm12,%%xmm12 \n"
602 "movq %%xmm12,(%1,%4) \n
    [all...]
  /art/compiler/optimizing/
optimizing_cfi_test_expected.inc 122 // 0x0000000d: movsd [rsp + 24], xmm12
126 // 0x00000018: movsd xmm12, [rsp + 24]
  /external/valgrind/none/tests/amd64/
redundantRexW.stdout.exp 21 %xmm12 ................................
47 %xmm12 ................................
73 %xmm12 ................................
99 %xmm12 ................................
125 %xmm12 ................................
151 %xmm12 ................................
177 %xmm12 ................................
203 %xmm12 ................................
229 %xmm12 ................................
255 %xmm12 ...............................
    [all...]
  /external/boringssl/linux-x86_64/crypto/modes/
ghash-x86_64.S 916 pshufd $78,%xmm11,%xmm12
917 pxor %xmm11,%xmm12
924 xorps %xmm12,%xmm4
931 pshufd $78,%xmm11,%xmm12
933 pxor %xmm11,%xmm12
951 xorps %xmm12,%xmm4
959 pshufd $78,%xmm11,%xmm12
961 pxor %xmm11,%xmm12
1009 pxor %xmm12,%xmm4
1010 pshufd $78,%xmm11,%xmm12
    [all...]
  /external/boringssl/mac-x86_64/crypto/modes/
ghash-x86_64.S 915 pshufd $78,%xmm11,%xmm12
916 pxor %xmm11,%xmm12
923 xorps %xmm12,%xmm4
930 pshufd $78,%xmm11,%xmm12
932 pxor %xmm11,%xmm12
950 xorps %xmm12,%xmm4
958 pshufd $78,%xmm11,%xmm12
960 pxor %xmm11,%xmm12
1008 pxor %xmm12,%xmm4
1009 pshufd $78,%xmm11,%xmm12
    [all...]
  /external/llvm/test/MC/Disassembler/X86/
simple-tests.txt 716 # CHECK: vfmadd132ps %xmm11, %xmm12, %xmm10
719 # CHECK: vfmadd132pd %xmm11, %xmm12, %xmm10
728 # CHECK: vfmadd132ps (%rax), %xmm12, %xmm10
731 # CHECK: vfmadd132pd (%rax), %xmm12, %xmm10
740 # CHECK: vfmadd132ss %xmm11, %xmm12, %xmm10
743 # CHECK: vfmadd132sd %xmm11, %xmm12, %xmm10
746 # CHECK: vfmadd132ss (%rax), %xmm12, %xmm10
749 # CHECK: vfmadd132sd (%rax), %xmm12, %xmm10
  /external/llvm/test/CodeGen/X86/
anyregcc.ll 389 ;SSE-NEXT: movaps %xmm12
434 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
458 %a12 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
463 call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3, <2 x double> %a4, <2 x double> %a5, <2 x double> %a6, <2 x double> %a7, <2 x double> %a8, <2 x double> %a9, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15)
  /art/compiler/dex/quick/
quick_cfi_test_expected.inc 120 // 0x0000000e: movsd [rsp + 24], xmm12
131 // 0x0000002d: movsd xmm12, [rsp + 24]

Completed in 5023 milliseconds

12 3 4