/art/compiler/jni/quick/x86/ |
calling_convention_x86.cc | 102 // First four float parameters are passed via XMM0..XMM3
|
/art/compiler/optimizing/ |
code_generator_x86_64.h | 35 { XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
|
/art/runtime/arch/x86/ |
context_x86.cc | 111 "movsd 24(%%ebx), %%xmm3\n\t"
|
quick_entrypoints_x86.S | 100 movsd %xmm3, 24(%esp) 140 movsd %xmm3, 24(%esp) 152 movsd 28(%esp), %xmm3 175 movsd 24(%esp), %xmm3 318 movsd 24(%esp), %xmm3 432 LOOP_OVER_SHORTY_LOADING_XMMS xmm3, esi, edi, al, .Lxmm_setup_finished 529 LOOP_OVER_SHORTY_LOADING_XMMS xmm3, esi, edi, al, .Lxmm_setup_finished2 [all...] |
/external/google-breakpad/src/third_party/libdisasm/ |
ia32_reg.c | 116 { REG_SIMD_SIZE, reg_simd, 0, "xmm3" },
|
/external/llvm/test/CodeGen/X86/ |
anyregcc.ll | 398 ;SSE-NEXT: movaps %xmm3 434 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"() 449 %a3 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind 463 call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3, <2 x double> %a4, <2 x double> %a5, <2 x double> %a6, <2 x double> %a7, <2 x double> %a8, <2 x double> %a9, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15)
|
vaargs.ll | 15 ; CHECK-NEXT: vmovaps %xmm3, 96(%rsp)
|
sse41.ll | 318 ; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] 320 ; X32-NEXT: addss %xmm2, %xmm3 321 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3] 327 ; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] 329 ; X64-NEXT: addss %xmm2, %xmm3 330 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3] [all...] |
vselect.ll | 242 ; CHECK-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm3[0] 267 ; CHECK-NEXT: movaps %xmm3, 48(%rdi)
|
/external/llvm/lib/Target/X86/ |
README.txt | 515 cvtss2sd LCPI1_1(%rip), %xmm3 519 movapd %xmm3, %xmm2 524 We should sink the load into xmm3 into the LBB1_2 block. This should 1018 movapd %xmm2, %xmm3 1019 mulsd %xmm3, %xmm3 1020 movapd %xmm3, %xmm4 1023 mulsd %xmm3, %xmm4 1025 mulsd %xmm3, %xmm4 1027 mulsd %xmm3, %xmm [all...] |
/art/runtime/arch/x86_64/ |
quick_entrypoints_x86_64.S | 177 movq %xmm3, 40(%rsp) 219 movq %xmm3, 40(%rsp) 239 movq 40(%rsp), %xmm3 478 LOOP_OVER_SHORTY_LOADING_XMMS xmm3, .Lxmm_setup_finished 572 LOOP_OVER_SHORTY_LOADING_XMMS xmm3, .Lxmm_setup_finished2 658 movq 24(%rsi), %xmm3 [all...] |
/external/boringssl/mac-x86_64/crypto/bn/ |
modexp512-x86_64.S | 1495 movdqu 48(%rsi),%xmm3 1503 movdqa %xmm3,592(%rsp) 1509 movdqu 48(%rdx),%xmm3 1590 movdqa %xmm3,112(%rsp) 1702 movdqu 48(%rdx),%xmm3 1710 movdqa %xmm3,560(%rsp)
|
/external/boringssl/win-x86_64/crypto/bn/ |
modexp512-x86_64.asm | 1504 movdqu xmm3,XMMWORD PTR[48+rsi] 1512 movdqa XMMWORD PTR[592+rsp],xmm3 1518 movdqu xmm3,XMMWORD PTR[48+rdx] 1599 movdqa XMMWORD PTR[112+rsp],xmm3 1711 movdqu xmm3,XMMWORD PTR[48+rdx] 1719 movdqa XMMWORD PTR[560+rsp],xmm3
|
/external/boringssl/linux-x86/crypto/sha/ |
sha256-586.S | [all...] |
/external/boringssl/mac-x86/crypto/sha/ |
sha256-586.S | [all...] |
/art/compiler/dex/quick/x86/ |
quick_assemble_x86_test.cc | 185 Test(kX86, "Pextrw", "pextrw $7, %xmm3, 8(%eax)\n", kX86PextrwMRI,
|
target_x86.cc | [all...] |
/art/compiler/utils/x86/ |
assembler_x86_test.cc | 69 new x86::XmmRegister(x86::XMM3),
|
/external/boringssl/src/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); 291 pxor %xmm0,%xmm3 292 pxor %xmm1,%xmm3 389 #rc4# movdqu %xmm3,16($out,$in0)
|
/external/google-breakpad/src/google_breakpad/common/ |
minidump_cpu_amd64.h | 175 uint128_struct xmm3; member in struct:__anon9467::__anon9468::__anon9469
|
/external/v8/src/ia32/ |
codegen-ia32.cc | 132 __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30)); 138 __ movdqa(Operand(dst, 0x30), xmm3); local 417 __ movdqu(xmm3, Operand(src, count, times_1, -0x10)); 421 __ movdqu(Operand(dst, count, times_1, -0x10), xmm3); local [all...] |
/external/llvm/test/MC/Disassembler/X86/ |
x86-32.txt | 223 # CHECK: vinsertps $129, %xmm3, %xmm2, %xmm1 337 # CHECK: vblendvps %xmm4, %xmm1, %xmm2, %xmm3 436 # CHECK: vaddps %xmm3, %xmm7, %xmm0
|
/external/boringssl/linux-x86_64/crypto/sha/ |
sha256-x86_64.S | 1788 movdqu 48(%rsi),%xmm3 1800 paddd %xmm3,%xmm7 1818 movdqa %xmm3,%xmm7 1848 pshufd $250,%xmm3,%xmm7 2103 movdqa %xmm3,%xmm4 [all...] |
/external/boringssl/mac-x86_64/crypto/sha/ |
sha256-x86_64.S | 1787 movdqu 48(%rsi),%xmm3 1799 paddd %xmm3,%xmm7 1817 movdqa %xmm3,%xmm7 1847 pshufd $250,%xmm3,%xmm7 2102 movdqa %xmm3,%xmm4 [all...] |
/external/boringssl/linux-x86_64/crypto/bn/ |
modexp512-x86_64.S | 1495 movdqu 48(%rsi),%xmm3 1503 movdqa %xmm3,592(%rsp) 1509 movdqu 48(%rdx),%xmm3 1590 movdqa %xmm3,112(%rsp) 1702 movdqu 48(%rdx),%xmm3 1710 movdqa %xmm3,560(%rsp)
|