/external/llvm/lib/Target/X86/ |
README-FPStack.txt | 75 movsd 24(%esp), %xmm0 76 movsd %xmm0, 8(%esp)
|
README-SSE.txt | 112 movsd 32(%esp), %xmm1 113 movsd 16(%esp), %xmm2 117 movsd %xmm0, %xmm2 119 movsd %xmm2, (%esp) 420 movsd 16(%esp), %xmm0 421 movsd %xmm0, (%esp) 751 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64)) 752 when code size is critical. movlps is slower than movsd on core2 but it's one 784 movsd %xmm0, (%esp)
|
README.txt | 660 movsd 24(%esp), %xmm1 952 movsd 24(%esp), %xmm0 953 movsd %xmm0, 8(%esp) 1048 movsd (%esp), %xmm0 1050 movsd %xmm0, (%esp) 1179 movsd 176(%esp), %xmm2 1197 movsd 152(%esp), %xmm1 1199 movsd %xmm1, 152(%esp) 1204 movsd 152(%esp), %xmm0 1207 movsd %xmm0, 152(%esp [all...] |
/external/valgrind/main/none/tests/amd64/ |
redundantRexW.c | 462 /* movsd mem, reg f2 48 0f 10 11 rex.W movsd (%rcx),%xmm2 */ 476 after_test( "rex.W movsd (%rcx),%xmm2", regs, mem ); 479 /* movsd reg, mem f2 48 0f 11 3f rex.W movsd %xmm7,(%rdi) */ 493 after_test( "rex.W movsd %xmm7,(%rdi)", regs, mem );
|
/external/v8/src/x64/ |
codegen-x64.cc | 147 __ movsd(Operand(rsp, kPointerSize * 2), xmm1); 148 __ movsd(Operand(rsp, kPointerSize), xmm0); 187 __ movsd(xmm0, Operand(rsp, kPointerSize)); 193 __ movsd(xmm0, Operand(rsp, kPointerSize)); 347 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
|
code-stubs-x64.cc | 274 __ movsd(xmm0, FieldOperand(rcx, i)); 275 __ movsd(FieldOperand(rdx, i), xmm0); [all...] |
/art/compiler/utils/x86/ |
assembler_x86.h | 277 void movsd(XmmRegister dst, const Address& src); 278 void movsd(const Address& dst, XmmRegister src); 279 void movsd(XmmRegister dst, XmmRegister src);
|
assembler_x86.cc | 400 void X86Assembler::movsd(XmmRegister dst, const Address& src) { function in class:art::x86::X86Assembler 409 void X86Assembler::movsd(const Address& dst, XmmRegister src) { function in class:art::x86::X86Assembler 418 void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { function in class:art::x86::X86Assembler 1248 movsd(dst, Address(ESP, 0)); [all...] |
/dalvik/vm/arch/x86/ |
Call386ABI.S | 126 movsd
|
/dalvik/vm/mterp/x86/ |
OP_FILLED_NEW_ARRAY.S | 82 movsd
|
/external/chromium_org/v8/test/cctest/ |
test-code-stubs-x64.cc | 84 __ movsd(MemOperand(rsp, 0), xmm0);
|
test-disasm-x64.cc | 355 __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000)); 356 __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
|
/external/qemu-pc-bios/vgabios/tests/lfbprof/ |
lfbprof.h | 136 "rep movsd" \
|
/external/valgrind/main/memcheck/tests/amd64/ |
sse_memory.stdout.exp | [all...] |
/external/valgrind/main/memcheck/tests/x86/ |
sse2_memory.stdout.exp | [all...] |
/external/chromium_org/v8/src/x64/ |
code-stubs-x64.cc | 633 __ movsd(xmm0, mantissa_operand); 830 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); 889 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); [all...] |
lithium-codegen-x64.cc | 208 __ movsd(MemOperand(rsp, count * kDoubleSize), [all...] |
/external/elfutils/tests/ |
testfile44.expect.bz2 | |
/external/libffi/src/x86/ |
darwin64.S | 151 movsd %xmm0, (%rdi) 279 movsd -24(%rsp), %xmm0
|
unix64.S | 154 movsd %xmm0, (%rdi) 289 movsd -24(%rsp), %xmm0
|
/external/v8/test/cctest/ |
test-disasm-x64.cc | 362 __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000)); 363 __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
|
/art/runtime/arch/x86/ |
quick_entrypoints_x86.S | 281 movsd %xmm0, (%ecx) // store the floating point result 436 movsd (%esp), %xmm0 // place into %xmm0 459 movsd (%esp), %xmm0 // place into %xmm0 [all...] |
/external/chromium_org/v8/src/ia32/ |
assembler-ia32.h | [all...] |
/external/libvpx/libvpx/examples/includes/geshi/geshi/ |
asm.php | 72 'jecxz','lfs','lgs','lodsd','loopd','looped','loopned','loopnzd','loopzd','lss','movsd',
|
/external/llvm/test/CodeGen/X86/ |
fast-isel-x86-64.ll | 269 ; CHECK: movsd LCPI
|