/external/v8/src/ia32/ |
codegen-ia32.cc | 120 __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); 121 __ sqrtsd(xmm0, xmm0); 122 __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); local 191 __ movdqu(xmm0, Operand(src, 0)); 192 __ movdqu(Operand(dst, 0), xmm0); local 216 __ movdqa(xmm0, Operand(src, 0x00)); 220 __ movdqa(Operand(dst, 0x00), xmm0); local 232 __ movdqa(xmm0, Operand(src, 0)); 234 __ movdqa(Operand(dst, 0), xmm0); local 241 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); local 266 __ movdqa(Operand(dst, 0x00), xmm0); local 280 __ movdqa(Operand(dst, 0), xmm0); local 287 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); local 456 xmm0); local 582 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); local [all...] |
lithium-gap-resolver-ia32.cc | 344 // We rely on having xmm0 available as a fixed scratch register. 346 __ movdbl(xmm0, src); 347 __ movdbl(dst, xmm0); 413 // XMM register-register swap. We rely on having xmm0 417 __ movaps(xmm0, src); 419 __ movaps(dst, xmm0); 422 // XMM register-memory swap. We rely on having xmm0 430 __ movdbl(xmm0, other); 432 __ movdbl(reg, Operand(xmm0)); 436 // purpose temporary register and also rely on having xmm0 available a [all...] |
/external/valgrind/main/none/tests/amd64/ |
redundantRexW.c | 109 "\tmovupd 0(%%r14), %%xmm0\n" \ 127 "\tmovupd %%xmm0, 0(%%r14)\n" \ 145 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", \ 242 /* movaps mem, reg 48 0f 28 42 30 rex.W movaps 0x30(%rdx),%xmm0 */ 256 after_test( "movaps 0x30(%rdx),%xmm0", regs, mem ); 293 /* movhpd mem, reg 66 48 0f 16 06 rex.W movhpd (%rsi),%xmm0 */ 307 after_test( "rex.W movhpd (%rsi),%xmm0", regs, mem ); 310 /* movhpd reg, mem 66 48 0f 17 07 rex.W movhpd %xmm0,(%rdi) */ 324 after_test( "rex.W movhpd %xmm0,(%rdi)", regs, mem ); 343 /* movhps reg, mem 49 0f 17 03 rex.WB movhps %xmm0,(%r11) * [all...] |
/bionic/libc/arch-x86/string/ |
ssse3-memcpy5.S | 204 movdqu (%eax), %xmm0 239 movdqu %xmm0, (%esi) 246 movdqa (%eax, %edi), %xmm0 249 movdqa %xmm0, (%edx, %edi) 254 movdqa (%eax, %edi), %xmm0 257 movdqa %xmm0, (%edx, %edi) 262 movdqa (%eax, %edi), %xmm0 265 movdqa %xmm0, (%edx, %edi) 270 movdqa (%eax, %edi), %xmm0 273 movdqa %xmm0, (%edx, %edi [all...] |
/external/llvm/lib/Target/X86/ |
X86CallingConv.td | 37 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 41 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 62 // case they use XMM0, otherwise it is the same as the common X86 calling 65 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 72 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 76 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 77 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 90 // The X86-64 calling convention always returns FP values in XMM0. 91 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>, 92 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>> [all...] |
X86CompilationCallback_Win64.asm | 35 movaps [rsp +32], xmm0 51 movaps xmm0, [rsp +32]
|
/external/libffi/src/x86/ |
darwin64.S | 147 movss %xmm0, (%rdi) 151 movsd %xmm0, (%rdi) 165 movd %xmm0, %r10 190 movdqa 48(%r10), %xmm0 275 movss -24(%rsp), %xmm0 279 movsd -24(%rsp), %xmm0 287 /* There are four possibilities here, %rax/%rdx, %xmm0/%rax, 288 %rax/%xmm0, %xmm0/%xmm1. We collapse two by always loading 290 bit 8 set means xmm0 gets the second word, and bit 9 mean [all...] |
unix64.S | 150 movss %xmm0, (%rdi) 154 movsd %xmm0, (%rdi) 169 movd %xmm0, %r10 194 movdqa 48(%r10), %xmm0 285 movss -24(%rsp), %xmm0 289 movsd -24(%rsp), %xmm0 298 /* There are four possibilities here, %rax/%rdx, %xmm0/%rax, 299 %rax/%xmm0, %xmm0/%xmm1. We collapse two by always loading 301 bit 8 set means xmm0 gets the second word, and bit 9 mean [all...] |
/external/llvm/test/TableGen/ |
MultiPat.td | 50 def XMM0: Register<"xmm0">; 68 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/valgrind/main/VEX/test/ |
fxsave.c | 37 asm __volatile__("movups vecZ, %xmm0"); 61 asm __volatile__("movups vec0, %xmm0"); 69 asm __volatile__("xorps %xmm0, %xmm7");
|
/external/valgrind/main/memcheck/tests/x86/ |
fxsave.c | 38 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm0"); 62 asm __volatile__("movups " VG_SYM(vec0) ", %xmm0"); 70 asm __volatile__("xorps %xmm0, %xmm7");
|
/external/llvm/test/MC/X86/ |
x86-32.s | 265 // CHECK: cmpps $0, %xmm0, %xmm1 267 cmpps $0, %xmm0, %xmm1 271 // CHECK: cmppd $0, %xmm0, %xmm1 273 cmppd $0, %xmm0, %xmm1 277 // CHECK: cmpss $0, %xmm0, %xmm1 279 cmpss $0, %xmm0, %xmm1 283 // CHECK: cmpsd $0, %xmm0, %xmm1 285 cmpsd $0, %xmm0, %xmm1 292 // CHECK: cmpps $0, %xmm0, %xmm1 294 cmpeqps %xmm0, %xmm [all...] |
x86-64.s | 516 cvtsd2siq %xmm0, %rax // CHECK: cvtsd2siq %xmm0, %rax 517 cvtsd2sil %xmm0, %eax // CHECK: cvtsd2sil %xmm0, %eax 518 cvtsd2si %xmm0, %rax // CHECK: cvtsd2siq %xmm0, %rax 521 cvttpd2dq %xmm1, %xmm0 // CHECK: cvttpd2dq %xmm1, %xmm0 522 cvttpd2dq (%rax), %xmm0 // CHECK: cvttpd2dq (%rax), %xmm0 [all...] |
/bionic/libm/amd64/ |
s_remquo.S | 35 movsd %xmm0,-8(%rsp) 64 movsd -8(%rsp),%xmm0
|
s_remquof.S | 35 movss %xmm0,-4(%rsp) 64 movss -4(%rsp),%xmm0
|
s_lrint.S | 35 cvtsd2si %xmm0, %rax
|
s_lrintf.S | 35 cvtss2si %xmm0, %rax
|
/external/clang/test/CodeGen/ |
asm-variable.c | 33 register double b0 asm("xmm0"); 59 // CHECK: call i64 asm "call *$1", "={rax},r,{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},~{dirflag},~{fpsr},~{flags}
|
/external/llvm/test/CodeGen/X86/ |
2008-10-27-CoalescerBug.ll | 18 ; CHECK: divsd %xmm0 19 ; CHECK: movsd %xmm0, 16(%esp)
|
movgs.ll | 50 ; X32: pmovsxwd %gs:(%eax), %xmm0 54 ; X64: pmovsxwd %gs:([[A0]]), %xmm0
|
widen_load-1.ll | 7 ; CHECK: movaps compl+128(%rip), %xmm0 8 ; CHECK: movaps %xmm0, (%rsp)
|
2008-08-25-AsmRegTypeMismatch.ll | 16 call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},~{dirflag},~{fpsr},~{flags}"( double undef, double undef, double undef, double 1.0, double undef, double 0.0, double undef, double 0.0 ) nounwind
|
/external/v8/src/x64/ |
lithium-gap-resolver-x64.cc | 230 __ movsd(xmm0, src); 231 __ movsd(cgen_->ToOperand(destination), xmm0); 269 __ movsd(xmm0, src); 271 __ movsd(dst, xmm0); 278 __ movaps(xmm0, source_reg); 280 __ movaps(destination_reg, xmm0); 292 __ movsd(xmm0, other_operand); 294 __ movsd(reg, xmm0);
|
/dalvik/vm/mterp/x86-atom/ |
OP_DIV_DOUBLE_2ADDR.S | 34 fldl (rFP, %edx, 4) # %xmm0<- vA
|
OP_DIV_FLOAT_2ADDR.S | 34 flds (rFP, %ecx, 4) # %xmm0<- vA
|