/external/llvm/test/CodeGen/X86/ |
2009-06-03-Win64SpillXMM.ll | 3 ; CHECK: movaps %xmm8, (%rsp) 8 tail call void asm sideeffect "", "~{xmm7},~{xmm8},~{dirflag},~{fpsr},~{flags}"() nounwind
|
/external/valgrind/main/memcheck/tests/amd64/ |
xor-undef-amd64.c | 67 "movups 16(%0), %%xmm8\n\t" 68 "xorps %%xmm8, %%xmm0\n\t" 73 : : "r"(junk) : "rax", "xmm8", "xmm0", "cc", "memory" 79 "movups 16(%0), %%xmm8\n\t" 85 : : "r"(junk) : "rax", "xmm8", "xmm0", "cc", "memory" 94 "movups 16(%0), %%xmm8\n\t" 95 "pxor %%xmm8, %%xmm0\n\t" 100 : : "r"(junk) : "rax", "xmm8", "xmm0", "cc", "memory" 106 "movups 16(%0), %%xmm8\n\t" 112 : : "r"(junk) : "rax", "xmm8", "xmm0", "cc", "memory [all...] |
fxsave-amd64.c | 30 asm __volatile__("movaps %xmm1, %xmm8");
|
/external/llvm/test/MC/AsmParser/ |
directive_seh.s | 30 movups %xmm8, (%rsp) 31 .seh_savexmm %xmm8, 0
|
/external/llvm/test/TableGen/ |
Slice.td | 49 def XMM8: Register<"xmm8">; 60 XMM8, XMM9, XMM10, XMM11,
|
TargetInstrSpec.td | 50 def XMM8: Register<"xmm8">; 61 XMM8, XMM9, XMM10, XMM11,
|
cast.td | 49 def XMM8: Register<"xmm8">; 60 XMM8, XMM9, XMM10, XMM11,
|
/external/llvm/test/MC/COFF/ |
seh.s | 34 movups %xmm8, (%rsp) 35 .seh_savexmm %xmm8, 0
|
/external/openssl/crypto/ |
x86_64cpuid.pl | 190 pxor %xmm8,%xmm8
|
/external/llvm/lib/Target/X86/ |
X86InstrControl.td | 144 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], 183 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], 219 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
|
X86RegisterInfo.td | 179 def XMM8: Register<"xmm8">, DwarfRegNum<[25, -2, -2]>; 199 def YMM8: RegisterWithSubRegs<"ymm8", [XMM8]>, DwarfRegAlias<XMM8>;
|
X86RegisterInfo.cpp | 105 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 349 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 415 // XMM8, XMM9, ... 416 assert(X86::XMM15 == X86::XMM8+7); 417 for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
|
X86MCCodeEmitter.cpp | 51 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range 64 if ((SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15) || [all...] |
X86InstrInfo.h | [all...] |
X86InstrCompiler.td | 285 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], 299 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], [all...] |
/external/llvm/test/MC/X86/ |
x86_64-avx-encoding.s | 3 // CHECK: vaddss %xmm8, %xmm9, %xmm10 5 vaddss %xmm8, %xmm9, %xmm10 7 // CHECK: vmulss %xmm8, %xmm9, %xmm10 9 vmulss %xmm8, %xmm9, %xmm10 11 // CHECK: vsubss %xmm8, %xmm9, %xmm10 13 vsubss %xmm8, %xmm9, %xmm10 15 // CHECK: vdivss %xmm8, %xmm9, %xmm10 17 vdivss %xmm8, %xmm9, %xmm10 19 // CHECK: vaddsd %xmm8, %xmm9, %xmm10 21 vaddsd %xmm8, %xmm9, %xmm1 [all...] |
/external/zlib/contrib/amd64/ |
amd64-match.S | 303 movdqu 48(%prev, %rdx), %xmm8 304 pcmpeqb %xmm8, %xmm7
|
/external/valgrind/main/docs/internals/ |
register-uses.txt | 52 xmm8-15 n y (8-12)
|
/external/valgrind/main/none/tests/amd64/ |
redundantRexW.c | 117 "\tmovupd 128(%%r14), %%xmm8\n" \ 135 "\tmovupd %%xmm8, 128(%%r14)\n" \ 146 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
|
/external/llvm/lib/Target/X86/MCTargetDesc/ |
X86MCTargetDesc.cpp | 163 case X86::XMM0: case X86::XMM8: 229 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
|
/external/llvm/lib/Target/X86/Disassembler/ |
X86DisassemblerDecoder.h | 215 ENTRY(XMM8) \
|
/external/valgrind/main/memcheck/ |
mc_machine.c | 532 if (o >= GOF(XMM8) && o+sz <= GOF(XMM8) +SZB(XMM8)) return GOF(XMM8); [all...] |
/external/v8/src/x64/ |
assembler-x64.h | 206 "xmm8", 247 const XMMRegister xmm8 = { 8 }; member in namespace:v8::internal [all...] |
disasm-x64.cc | [all...] |
/external/llvm/docs/ |
TableGenFundamentals.html | 237 XMM6, XMM7, XMM8, XMM9, [all...] |