HomeSort by relevance Sort by last modified time
    Searched full:movsd (Results 51 - 75 of 165) sorted by null

1 23 4 5 6 7

  /external/chromium_org/v8/src/ia32/
deoptimizer-ia32.cc 257 __ movsd(Operand(esp, offset), xmm_reg);
309 __ movsd(xmm0, Operand(esp, src_offset));
310 __ movsd(Operand(ebx, dst_offset), xmm0);
395 __ movsd(xmm_reg, Operand(ebx, src_offset));
lithium-codegen-ia32.cc 140 __ movsd(MemOperand(esp, count * kDoubleSize),
157 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
    [all...]
code-stubs-ia32.h 444 masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
456 masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
  /external/llvm/test/MC/Disassembler/X86/
intel-syntax.txt 9 # CHECK: movsd
  /external/llvm/test/MC/X86/
intel-syntax-encoding.s 59 movsd XMM5, QWORD PTR [-8]
  /external/llvm/lib/Target/X86/
README-FPStack.txt 75 movsd 24(%esp), %xmm0
76 movsd %xmm0, 8(%esp)
README-SSE.txt 112 movsd 32(%esp), %xmm1
113 movsd 16(%esp), %xmm2
117 movsd %xmm0, %xmm2
119 movsd %xmm2, (%esp)
420 movsd 16(%esp), %xmm0
421 movsd %xmm0, (%esp)
751 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64))
752 when code size is critical. movlps is slower than movsd on core2 but it's one
784 movsd %xmm0, (%esp)
  /external/valgrind/main/none/tests/amd64/
redundantRexW.c 462 /* movsd mem, reg f2 48 0f 10 11 rex.W movsd (%rcx),%xmm2 */
476 after_test( "rex.W movsd (%rcx),%xmm2", regs, mem );
479 /* movsd reg, mem f2 48 0f 11 3f rex.W movsd %xmm7,(%rdi) */
493 after_test( "rex.W movsd %xmm7,(%rdi)", regs, mem );
  /external/chromium_org/v8/test/cctest/
test-assembler-ia32.cc 267 __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
268 __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
275 __ movsd(Operand(esp, 0), xmm0);
316 __ movsd(Operand(esp, 0), xmm0);
578 __ movsd(xmm1, Operand(esp, 4));
test-code-stubs-x64.cc 87 __ movsd(MemOperand(rsp, 0), xmm0);
test-disasm-ia32.cc 385 __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
386 __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
test-disasm-x64.cc 365 __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
366 __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
  /external/v8/src/x64/
codegen-x64.cc 147 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
148 __ movsd(Operand(rsp, kPointerSize), xmm0);
187 __ movsd(xmm0, Operand(rsp, kPointerSize));
193 __ movsd(xmm0, Operand(rsp, kPointerSize));
347 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
code-stubs-x64.cc 274 __ movsd(xmm0, FieldOperand(rcx, i));
275 __ movsd(FieldOperand(rdx, i), xmm0);
    [all...]
  /art/compiler/utils/x86/
assembler_x86.h 277 void movsd(XmmRegister dst, const Address& src);
278 void movsd(const Address& dst, XmmRegister src);
279 void movsd(XmmRegister dst, XmmRegister src);
assembler_x86.cc 400 void X86Assembler::movsd(XmmRegister dst, const Address& src) { function in class:art::x86::X86Assembler
409 void X86Assembler::movsd(const Address& dst, XmmRegister src) { function in class:art::x86::X86Assembler
418 void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { function in class:art::x86::X86Assembler
1248 movsd(dst, Address(ESP, 0));
    [all...]
  /external/chromium_org/v8/src/x64/
deoptimizer-x64.cc 149 __ movsd(Operand(rsp, offset), xmm_reg);
279 __ movsd(xmm_reg, Operand(rbx, src_offset));
  /dalvik/vm/arch/x86/
Call386ABI.S 126 movsd
  /dalvik/vm/mterp/x86/
OP_FILLED_NEW_ARRAY.S 82 movsd
  /external/qemu-pc-bios/vgabios/tests/lfbprof/
lfbprof.h 136 "rep movsd" \
  /external/valgrind/main/memcheck/tests/amd64/
sse_memory.stdout.exp     [all...]
  /external/valgrind/main/memcheck/tests/x86/
sse2_memory.stdout.exp     [all...]
  /external/elfutils/tests/
testfile44.expect.bz2 
  /external/libffi/src/x86/
darwin64.S 151 movsd %xmm0, (%rdi)
279 movsd -24(%rsp), %xmm0
unix64.S 154 movsd %xmm0, (%rdi)
289 movsd -24(%rsp), %xmm0

Completed in 396 milliseconds

1 23 4 5 6 7