HomeSort by relevance Sort by last modified time
    Searched full:movsd (Results 201 - 225 of 320) sorted by null

1 2 3 4 5 6 7 891011>>

  /external/v8/src/ia32/
assembler-ia32.h     [all...]
  /external/v8/src/x64/
codegen-x64.cc 215 __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
macro-assembler-x64.h     [all...]
  /external/valgrind/docs/internals/
3_3_BUGSTATUS.txt 222 171645 Fixd vx1869 Unrecognised instruction (MOVSD, non-binutils
  /external/valgrind/memcheck/tests/amd64/
sse_memory.c 310 TEST_INSN( &AllMask, 8,movsd)
  /bionic/libm/x86_64/
s_cos.S 184 movsd %xmm0, 8(%rsp)
581 movsd 8(%rsp), %xmm0
e_log.S 64 movsd %xmm0, (%rsp)
e_log10.S 64 movsd %xmm0, (%rsp)
s_atan.S 67 movsd %xmm0, (%rsp)
s_log1p.S 64 movsd %xmm0, 8(%rsp)
  /external/llvm/test/MC/X86/
intel-syntax.s 76 // CHECK: movsd -8, %xmm5
77 movsd XMM5, QWORD PTR [-8]
x86-32-coverage.s     [all...]
  /prebuilts/go/darwin-x86/pkg/bootstrap/src/bootstrap/compile/internal/amd64/
peep.go 262 // MOVSD removal.
263 // We never use packed registers, so a MOVSD between registers
  /prebuilts/go/darwin-x86/src/cmd/compile/internal/amd64/
peep.go 259 // MOVSD removal.
260 // We never use packed registers, so a MOVSD between registers
  /prebuilts/go/linux-x86/pkg/bootstrap/src/bootstrap/compile/internal/amd64/
peep.go 262 // MOVSD removal.
263 // We never use packed registers, so a MOVSD between registers
  /prebuilts/go/linux-x86/src/cmd/compile/internal/amd64/
peep.go 259 // MOVSD removal.
260 // We never use packed registers, so a MOVSD between registers
  /external/llvm/test/CodeGen/X86/
vec_fp_to_int.ll 168 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
219 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
272 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
323 ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
409 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
    [all...]
sse2.ll 184 ; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
vector-shuffle-128-v4.ll     [all...]
  /external/valgrind/none/tests/amd64/
insn_sse2.def 118 movsd xmm.pd[1234.5678,8765.4321] xmm.pd[1111.1111,2222.2222] => 1.pd[1234.5678,2222.2222]
119 movsd m64.pd[1234.5678] xmm.pd[1111.1111,2222.2222] => 1.pd[1234.5678,0.0]
120 movsd xmm.pd[1234.5678,8765.4321] m64.pd[1111.1111] => 1.pd[1234.5678]
    [all...]
  /external/valgrind/none/tests/x86/
insn_sse2.def 118 movsd xmm.pd[1234.5678,8765.4321] xmm.pd[1111.1111,2222.2222] => 1.pd[1234.5678,2222.2222]
119 movsd m64.pd[1234.5678] xmm.pd[1111.1111,2222.2222] => 1.pd[1234.5678,0.0]
120 movsd xmm.pd[1234.5678,8765.4321] m64.pd[1111.1111] => 1.pd[1234.5678]
    [all...]
  /external/llvm/lib/Target/X86/
X86ISelLowering.h 384 MOVSD,
865 // expensive than a straight movsd. On the other hand, it's important to
    [all...]
X86InstrSSE.td 542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
546 // don't use movss/movsd for copies.
607 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
615 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
755 // MOVSD to the lower bits.
    [all...]
  /art/compiler/optimizing/
intrinsics_x86_64.cc 220 __ movsd(xmm_temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF)));
357 __ movsd(out, codegen->LiteralInt64Address(INT64_C(0x7FF8000000000000)));
366 __ movsd(out, op2);
684 __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
    [all...]
  /external/llvm/docs/CommandGuide/
FileCheck.rst 16 (for example, a movsd from esp or whatever is interesting). This is similar to

Completed in 843 milliseconds

1 2 3 4 5 6 7 891011>>