/art/compiler/utils/x86/ |
assembler_x86.cc | 491 void X86Assembler::movsd(XmmRegister dst, const Address& src) { function in class:art::x86::X86Assembler 500 void X86Assembler::movsd(const Address& dst, XmmRegister src) { function in class:art::x86::X86Assembler 509 void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { function in class:art::x86::X86Assembler [all...] |
assembler_x86.h | 390 void movsd(XmmRegister dst, const Address& src); 391 void movsd(const Address& dst, XmmRegister src); 392 void movsd(XmmRegister dst, XmmRegister src);
|
/external/llvm/test/CodeGen/X86/ |
vector-shift-lshr-128.ll | 23 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] 68 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] 86 ; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] 95 ; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1] 164 ; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] 173 ; X32-SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1] 708 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] 749 ; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] 763 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] 768 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1 [all...] |
/external/elfutils/tests/ |
testfile44.expect.bz2 | |
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
sse2avx.s | 444 movsd (%ecx),%xmm4 451 movsd %xmm4,(%ecx) 647 movsd %xmm4,%xmm6 1105 movsd xmm4,QWORD PTR [ecx] 1112 movsd QWORD PTR [ecx],xmm4 1308 movsd xmm6,xmm4
|
x86-64-sse2avx.s | 444 movsd (%rcx),%xmm4 451 movsd %xmm4,(%rcx) 690 movsd %xmm4,%xmm6 1148 movsd xmm4,QWORD PTR [rcx] 1155 movsd QWORD PTR [rcx],xmm4 1394 movsd xmm6,xmm4
|
opts.d | 140 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 141 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6 244 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 245 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6
|
x86-64-opts-intel.d | 177 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd xmm6,xmm4 178 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s xmm6,xmm4 299 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd xmm6,xmm4 300 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s xmm6,xmm4
|
x86-64-opts.d | 176 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 177 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6 298 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 299 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6
|
x86-64-simd-intel.d | 81 [ ]*[a-f0-9]+: f2 0f 10 00 movsd xmm0,QWORD PTR \[rax\] 82 [ ]*[a-f0-9]+: f2 0f 11 00 movsd QWORD PTR \[rax\],xmm0 200 [ ]*[a-f0-9]+: f2 0f 10 00 movsd xmm0,QWORD PTR \[rax\] 201 [ ]*[a-f0-9]+: f2 0f 11 00 movsd QWORD PTR \[rax\],xmm0
|
x86-64-simd-suffix.d | 81 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 82 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\) 200 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 201 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\)
|
x86-64-simd.d | 80 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 81 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\) 199 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 200 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\)
|
sse2.d | 102 [ ]*[a-f0-9]+: f2 0f 10 ee movsd %xmm6,%xmm5 103 [ ]*[a-f0-9]+: f2 0f 11 3e movsd %xmm7,\(%esi\) 104 [ ]*[a-f0-9]+: f2 0f 10 38 movsd \(%eax\),%xmm7
|
/art/compiler/optimizing/ |
optimizing_cfi_test_expected.inc | 120 // 0x00000006: movsd [rsp + 32], xmm13 122 // 0x0000000d: movsd [rsp + 24], xmm12 126 // 0x00000018: movsd xmm12, [rsp + 24] 128 // 0x0000001f: movsd xmm13, [rsp + 32]
|
intrinsics_x86.cc | 117 __ movsd(temp, input.AsFpuRegister<XmmRegister>()); 136 __ movsd(output.AsFpuRegister<XmmRegister>(), temp1); 286 __ movsd(temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF), constant_area)); 473 __ movsd(out, codegen->LiteralInt64Address(kDoubleNaN, constant_area)); 481 __ movsd(out, Address(ESP, 0)); 494 __ movsd(out, op2); 865 __ movsd(Address(ESP, 0), XMM0); 869 __ movsd(Address(ESP, 8), XMM1); 877 __ movsd(XMM0, Address(ESP, 0)); [all...] |
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ilp32/ |
x86-64-opts-intel.d | 177 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd xmm6,xmm4 178 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s xmm6,xmm4 299 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd xmm6,xmm4 300 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s xmm6,xmm4
|
x86-64-opts.d | 177 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 178 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6 299 [ ]*[a-f0-9]+: f2 0f 10 f4 movsd %xmm4,%xmm6 300 [ ]*[a-f0-9]+: f2 0f 11 e6 movsd.s %xmm4,%xmm6
|
x86-64-simd-intel.d | 81 [ ]*[a-f0-9]+: f2 0f 10 00 movsd xmm0,QWORD PTR \[rax\] 82 [ ]*[a-f0-9]+: f2 0f 11 00 movsd QWORD PTR \[rax\],xmm0 200 [ ]*[a-f0-9]+: f2 0f 10 00 movsd xmm0,QWORD PTR \[rax\] 201 [ ]*[a-f0-9]+: f2 0f 11 00 movsd QWORD PTR \[rax\],xmm0
|
x86-64-simd-suffix.d | 81 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 82 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\) 200 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 201 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\)
|
x86-64-simd.d | 81 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 82 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\) 200 [ ]*[a-f0-9]+: f2 0f 10 00 movsd \(%rax\),%xmm0 201 [ ]*[a-f0-9]+: f2 0f 11 00 movsd %xmm0,\(%rax\)
|
/art/compiler/utils/x86_64/ |
assembler_x86_64.cc | 563 void X86_64Assembler::movsd(XmmRegister dst, const Address& src) { function in class:art::x86_64::X86_64Assembler 573 void X86_64Assembler::movsd(const Address& dst, XmmRegister src) { function in class:art::x86_64::X86_64Assembler 583 void X86_64Assembler::movsd(XmmRegister dst, XmmRegister src) { function in class:art::x86_64::X86_64Assembler 586 EmitOptionalRex32(src, dst); // Movsd is MR encoding instead of the usual RM. [all...] |
/bionic/libm/x86_64/ |
e_sinh.S | 88 movsd HALFMASK(%rip), %xmm3 90 movsd L2E(%rip), %xmm1 91 movsd 8+L2E(%rip), %xmm2 94 movsd Shifter(%rip), %xmm6 183 movsd 48+cv(%rip), %xmm7
|
/external/v8/src/ia32/ |
code-stubs-ia32.cc | 80 __ movsd(Operand(esp, i * kDoubleSize), reg); 95 __ movsd(reg, Operand(esp, i * kDoubleSize)); 282 __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 300 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 358 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); 374 __ movsd(double_exponent, 382 __ movsd(double_exponent, 479 __ movsd(Operand(esp, 0), double_exponent); 481 __ movsd(Operand(esp, 0), double_base); 505 __ movsd(double_result, Operand(esp, 0)) [all...] |
/external/v8/src/x64/ |
macro-assembler-x64.cc | 781 Movsd(Operand(rsp, i * kDoubleSize), reg); 794 Movsd(reg, Operand(rsp, i * kDoubleSize)); [all...] |
code-stubs-x64.cc | 135 __ Movsd(kScratchDoubleReg, mantissa_operand); 192 __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 199 __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 243 __ Movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); 259 __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); 266 __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); 360 __ Movsd(Operand(rsp, 0), double_exponent); 362 __ Movsd(Operand(rsp, 0), double_base); 385 __ Movsd(double_result, Operand(rsp, 0)); 400 __ Movsd(double_scratch, double_base); // Back up base [all...] |