/external/llvm/test/CodeGen/X86/ |
vec_set-8.ll | 3 ; CHECK-NOT: movsd 5 ; CHECK-NOT: movsd
|
volatile.ll | 1 ; RUN: llc < %s -march=x86 -mattr=sse2 | grep movsd | count 5 2 ; RUN: llc < %s -march=x86 -mattr=sse2 -O0 | grep -v esp | grep movsd | count 5
|
fp-stack-direct-ret.ll | 2 ; RUN: llc < %s -march=x86 -mcpu=yonah | not grep movsd
|
lsr-static-addr.ll | 5 ; CHECK: movsd .LCPI0_0(%rip), %xmm0 8 ; CHECK-NEXT: movsd A(,%rax,8) 10 ; CHECK-NEXT: movsd 13 ; ATOM: movsd .LCPI0_0(%rip), %xmm0 17 ; ATOM-NEXT: movsd A(,%rax,8) 19 ; ATOM-NEXT: movsd
|
i64-mem-copy.ll | 6 ; X32: movsd (%eax), %xmm 8 ; Uses movsd to load / store i64 values if sse2 is available.
|
rip-rel-address.ll | 12 ; PIC64: movsd _a(%rip), %xmm0 13 ; STATIC64: movsd a(%rip), %xmm0
|
compare_folding.ll | 2 ; RUN: grep movsd | count 1
|
fastcc-2.ll | 1 ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep movsd
|
2012-07-10-extload64.ll | 6 ; CHECK: movsd 30 ;CHECK: movsd
|
vec_set-7.ll | 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1
|
gather-addresses.ll | 12 ; CHECK: movsd ([[P:%rdi|%rcx]],%rax,8), %xmm0 16 ; CHECK: movsd ([[P]],%rax,8), %xmm1
|
pr3154.ll | 32 call void asm sideeffect "movsd $0, %xmm7 \0A\09movapd ff_pd_1, %xmm6 \0A\09movapd ff_pd_2, %xmm5 \0A\09movlhps %xmm7, %xmm7 \0A\09subpd %xmm5, %xmm7 \0A\09addsd %xmm6, %xmm7 \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* %c) nounwind 84 %asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addpd %xmm3, %xmm2 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09movhlps %xmm2, %xmm5 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09addsd %xmm5, %xmm2 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09movsd %xmm2, $3 \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %28, double* %29, double* %30, double* %21, double* %27, i32 %22) nounwind ; <i32> [#uses=0] 92 %asmtmp34 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\091: \0A\09movapd ($3,$0), %xmm3 \0A\09movupd -8($4,$0), %xmm4 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd ($4,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm3, %xmm0 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09", "=&r,=*m,=*m,r,r,0,~{ (…) [all...] |
2008-06-13-NotVolatileLoadStore.ll | 1 ; RUN: llc < %s -march=x86 | not grep movsd
|
2008-06-13-VolatileLoadStore.ll | 1 ; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movsd | count 5
|
/frameworks/compile/libbcc/runtime/lib/i386/ |
floatdidf.S | 27 movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52 31 movsd %xmm0, 4(%esp)
|
floatundixf.S | 31 movsd %xmm1, 4(%esp) 33 movsd %xmm0, 4(%esp)
|
/dalvik/vm/mterp/x86/ |
OP_MUL_DOUBLE.S | 7 # TODO: movsd?
|
OP_SUB_DOUBLE.S | 7 # TODO: movsd?
|
OP_MUL_DOUBLE_2ADDR.S | 8 # TODO: movsd?
|
OP_SUB_DOUBLE_2ADDR.S | 8 # TODO: movsd?
|
/external/compiler-rt/lib/i386/ |
floatdidf.S | 27 movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52 31 movsd %xmm0, 4(%esp)
|
floatundixf.S | 31 movsd %xmm1, 4(%esp) 33 movsd %xmm0, 4(%esp)
|
/bionic/libm/amd64/ |
s_remquo.S | 35 movsd %xmm0,-8(%rsp) 36 movsd %xmm1,-16(%rsp) 64 movsd -8(%rsp),%xmm0
|
s_scalbn.S | 31 movsd %xmm0,-8(%rsp) 38 movsd -8(%rsp),%xmm0
|
/external/v8/src/x64/ |
lithium-gap-resolver-x64.cc | 222 __ movsd(cgen_->ToOperand(destination), src); 227 __ movsd(cgen_->ToDoubleRegister(destination), src); 230 __ movsd(xmm0, src); 231 __ movsd(cgen_->ToOperand(destination), xmm0); 269 __ movsd(xmm0, src); 271 __ movsd(dst, xmm0); 292 __ movsd(xmm0, other_operand); 293 __ movsd(other_operand, reg); 294 __ movsd(reg, xmm0);
|