HomeSort by relevance Sort by last modified time
    Searched full:movsd (Results 26 - 50 of 320) sorted by null

12 3 4 5 6 7 8 91011>>

  /external/llvm/test/MC/ELF/
merge.s 7 movsd .Lfoo(%rip), %xmm1
13 movsd .Lfoo+4(%rip), %xmm1
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-avx-swap.s 21 movsd %xmm8,%xmm6
53 movsd xmm6,xmm8
prefix.d 20 [ ]*[a-f0-9]+: f2 66 0f 11 22 data16 movsd %xmm4,\(%edx\)
21 [ ]*[a-f0-9]+: f2 67 66 0f 11 22 data16 movsd %xmm4,\(%bp,%si\)
22 [ ]*[a-f0-9]+: f2 67 f0 66 0f 11 22 lock data16 movsd %xmm4,\(%bp,%si\)
25 [ ]*[a-f0-9]+: f3 67 f2 66 0f 11 22 repz data16 movsd %xmm4,\(%bp,%si\)
27 [ ]*[a-f0-9]+: f2 66 36 0f 11 22 data16 movsd %xmm4,%ss:\(%edx\)
28 [ ]*[a-f0-9]+: f3 f0 f2 66 36 0f 11 22 repz lock data16 movsd %xmm4,%ss:\(%edx\)
29 [ ]*[a-f0-9]+: f2 66 3e 36 0f 11 22 data16 ds movsd %xmm4,%ss:\(%edx\)
30 [ ]*[a-f0-9]+: f2 67 66 3e 36 0f 11 22 data16 ds movsd %xmm4,%ss:\(%bp,%si\)
31 [ ]*[a-f0-9]+: f2 67 f0 66 3e 36 0f 11 22 lock data16 ds movsd %xmm4,%ss:\(%bp,%si\)
34 [ ]*[a-f0-9]+: f3 67 f2 66 3e 36 0f 11 22 repz data16 ds movsd %xmm4,%ss:\(%bp,%si\
    [all...]
prefix.s 25 # data16 movsd %xmm4,(%edx)
32 # data16 movsd %xmm4,(%bp,%si)
40 # lock data16 movsd %xmm4,(%bp,%si)
65 # repz data16 movsd %xmm4,(%bp,%si)
82 # data16 movsd %xmm4,%ss:(%edx)
90 # repz lock data16 movsd %xmm4,%ss:(%edx)
100 # data16 ds movsd %xmm4,%ss:(%edx)
109 # data16 ds movsd %xmm4,%ss:(%bp,%si)
119 # lock data16 ds movsd %xmm4,%ss:(%bp,%si)
149 # repz data16 ds movsd %xmm4,%ss:(%bp,%si
    [all...]
  /external/llvm/test/CodeGen/X86/
i64-mem-copy.ll 5 ; Use movq or movsd to load / store i64 values if sse2 is available.
19 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
20 ; X32-NEXT: movsd %xmm0, (%eax)
71 ; X32: movsd {{.*#+}} xmm0 = mem[0],zero
72 ; X32-NEXT: movsd %xmm0, (%eax)
memcpy-2.ll 15 ; SSE2-Darwin: movsd _.str+16, %xmm0
16 ; SSE2-Darwin: movsd %xmm0, 16(%esp)
22 ; SSE2-Mingw32: movsd _.str+16, %xmm0
23 ; SSE2-Mingw32: movsd %xmm0, 16(%esp)
96 ; SSE2-Darwin: movsd (%ecx), %xmm0
97 ; SSE2-Darwin: movsd 8(%ecx), %xmm1
98 ; SSE2-Darwin: movsd %xmm1, 8(%eax)
99 ; SSE2-Darwin: movsd %xmm0, (%eax)
102 ; SSE2-Mingw32: movsd (%ecx), %xmm0
103 ; SSE2-Mingw32: movsd 8(%ecx), %xmm
    [all...]
vec_set-7.ll 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1
vselect-2.ll 8 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
23 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
37 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
52 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
catchpad-realign-savexmm.ll 41 ; CHECK: movsd fp_global(%rip), %xmm6 # xmm6 = mem[0],zero
44 ; CHECK: movsd %xmm6, fp_global(%rip)
peephole-fold-movsd.ll 3 ; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into
13 ; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]]
uint_to_fp-2.ll 8 ; CHECK-NEXT: movsd .LCPI0_0, %xmm0
30 ; CHECK-NEXT: movsd .LCPI1_0, %xmm0
sse-fcopysign.ll 29 ; X32: movsd {{.*#+}} xmm0 = mem[0],zero
33 ; X32-NEXT: movsd %xmm0, (%esp)
34 ; X32-NEXT: movsd %xmm1, 8(%esp)
81 ; X32-NEXT: movsd 8(%ebp), %xmm1 {{.*#+}} xmm1 = mem[0],zero
125 ; X64: movsd .LCPI5_0(%rip), %xmm0 {{.*#+}} xmm0 = mem[0],zero
pr3154.ll 32 call void asm sideeffect "movsd $0, %xmm7 \0A\09movapd ff_pd_1, %xmm6 \0A\09movapd ff_pd_2, %xmm5 \0A\09movlhps %xmm7, %xmm7 \0A\09subpd %xmm5, %xmm7 \0A\09addsd %xmm6, %xmm7 \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* %c) nounwind
84 %asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addpd %xmm3, %xmm2 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09movhlps %xmm2, %xmm5 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09addsd %xmm5, %xmm2 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09movsd %xmm2, $3 \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %28, double* %29, double* %30, double* %21, double* %27, i32 %22) nounwind ; <i32> [#uses=0]
92 %asmtmp34 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\091: \0A\09movapd ($3,$0), %xmm3 \0A\09movupd -8($4,$0), %xmm4 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd ($4,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm3, %xmm0 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09", "=&r,=*m,=*m,r,r,0,~{ (…)
    [all...]
2008-06-13-NotVolatileLoadStore.ll 5 ; CHECK-NOT: movsd
2008-06-13-VolatileLoadStore.ll 1 ; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movsd | count 5
arg-cast.ll 3 ; RUN: llc < %s | not grep movsd
  /prebuilts/go/darwin-x86/src/math/
dim_amd64.s 37 MOVSD x+0(FP), X0
39 MOVSD $(0.0), X1
41 MOVSD X0, ret+16(FP)
77 MOVSD X0, ret+16(FP)
121 MOVSD X0, ret+16(FP)
  /prebuilts/go/linux-x86/src/math/
dim_amd64.s 37 MOVSD x+0(FP), X0
39 MOVSD $(0.0), X1
41 MOVSD X0, ret+16(FP)
77 MOVSD X0, ret+16(FP)
121 MOVSD X0, ret+16(FP)
  /bionic/libm/x86/
e_hypot.S 95 movsd 160(%esp), %xmm0
96 movsd 168(%esp), %xmm1
148 movsd %xmm0, 32(%esp)
149 movsd %xmm1, 40(%esp)
156 movsd 160(%esp), %xmm0
157 movsd 168(%esp), %xmm1
164 movsd %xmm0, 32(%esp)
165 movsd %xmm1, 40(%esp)
rint.S 30 movsd 16(%esp),%xmm0
32 movsd %xmm0,(%esp)
e_exp.S 92 movsd 128(%esp), %xmm0
168 movsd %xmm0, 8(%esp)
170 movsd %xmm6, 16(%esp)
174 movsd %xmm4, 8(%esp)
178 movsd 8(%esp), %xmm0
202 movsd %xmm0, (%esp)
203 movsd 128(%esp), %xmm0
212 movsd 1208(%ebx), %xmm0
217 movsd 1216(%ebx), %xmm0
230 movsd 1192(%ebx), %xmm
    [all...]
e_atan2.S 106 movsd 136(%esp), %xmm1
107 movsd 128(%esp), %xmm0
176 movsd 2944(%ebx), %xmm2
177 movsd 2960(%ebx), %xmm3
209 movsd %xmm1, (%esp)
227 movsd 2944(%ebx), %xmm1
228 movsd 2960(%ebx), %xmm5
247 movsd %xmm5, (%esp)
251 movsd 16(%esp), %xmm1
252 movsd 8(%esp), %xmm
    [all...]
libm_tancot_huge.S 66 movsd 8(%ebp), %xmm1
72 movsd _Pi4Inv@GOTOFF(%esi), %xmm0
74 movsd %xmm1, 8(%ebp)
75 movsd %xmm0, (%esp)
83 movsd 8(%ebp), %xmm1
109 movsd %xmm1, 16(%esp)
160 movsd 8(%ebp), %xmm0
164 movsd %xmm0, 16(%eax)
278 movsd 16(%esp), %xmm0
281 movsd 16(%esp), %xmm
    [all...]
  /bionic/libm/x86_64/
e_hypot.S 77 movsd %xmm0, 48(%rsp)
78 movsd %xmm1, 56(%rsp)
130 movsd %xmm0, 32(%rsp)
131 movsd %xmm1, 40(%rsp)
142 movsd %xmm0, 32(%rsp)
143 movsd %xmm1, 40(%rsp)
  /external/v8/src/crankshaft/x64/
lithium-gap-resolver-x64.cc 218 __ Movsd(cgen_->ToOperand(destination), src);
223 __ Movsd(cgen_->ToDoubleRegister(destination), src);
226 __ Movsd(xmm0, src);
227 __ Movsd(cgen_->ToOperand(destination), xmm0);
267 __ Movsd(xmm0, src);
269 __ Movsd(dst, xmm0);
291 __ Movsd(reg, other_operand);
292 __ Movsd(other_operand, xmm0);

Completed in 1700 milliseconds

12 3 4 5 6 7 8 91011>>