/external/valgrind/main/none/tests/amd64/ |
loopnel.c | 6 long rcx = 0x200000005UL; local 8 asm volatile ("1: addq $1, %0; loopnel 1b" : "+a" (rax), "+c" (rcx) : : "cc"); 9 printf ("%ld %ld\n", rax, rcx);
|
pcmpxstrx64.stdout.exp | 3 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 5555555555550006 flags 00000881 4 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4A: xmm0 000000000000000000ffffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0A: xmm0 0000000000000000000000000000007f rcx 5555555555555555 flags 00000881 7 estri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000008c1 8 estri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4A: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0A: xmm0 0000000000000000000000000000ffff rcx 5555555555555555 flags 000008c1 13 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000000c1 14 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c [all...] |
pcmpxstrx64w.stdout.exp | 3 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550002 flags 00000881 4 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4B: xmm0 00000000000000000000ffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0B: xmm0 00000000000000000000000000000007 rcx 5555555555555555 flags 00000881 7 estri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000008c1 8 estri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4B: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0B: xmm0 000000000000000000000000000000ff rcx 5555555555555555 flags 000008c1 13 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1 14 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550003 flags 000000c [all...] |
asorep.c | 10 unsigned long rdi, rsi, rcx, rax; local 20 : "=D" (rdi), "=S" (rsi), "=c" (rcx) 26 || rcx) 31 : "=D" (rdi), "=c" (rcx), "+a" (rax) 36 || rcx 50 : "=D" (rdi), "=S" (rsi), "=c" (rcx) 54 || rcx != 17ULL) 60 : "=D" (rdi), "=c" (rcx), "+a" (rax) 63 || rcx != 23ULL 69 : "=D" (rdi), "=c" (rcx), "+a" (rax [all...] |
cmpxchg.c | 13 ULong rcx; variable 25 rcx = 0x33333333; rbx = 0x44444444; 28 rax&0xff,rbx&0xff,rcx&0xff); 33 "\tpush %rcx\n" 39 "\tmov " VG_SYM(rcx) ",%rcx\n" 44 "\tmov " VG_SYM(rcx) "(%rip),%rcx\n" 51 "\tmov %rcx," VG_SYM(rcx_out) "\n" 55 "\tmov %rcx," VG_SYM(rcx_out) "(%rip)\n [all...] |
/external/llvm/test/MC/X86/ |
x86_64-fma4-encoding.s | 4 // CHECK: vfmaddss (%rcx), %xmm1, %xmm0, %xmm0 6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0 8 // CHECK: vfmaddss %xmm1, (%rcx), %xmm0, %xmm0 10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0 16 // CHECK: vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0 18 vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0 20 // CHECK: vfmaddsd %xmm1, (%rcx), %xmm0, %xmm0 22 vfmaddsd %xmm1, (%rcx),%xmm0, %xmm0 32 // CHECK: vfmaddps (%rcx), %xmm1, %xmm0, %xmm0 34 vfmaddps (%rcx), %xmm1, %xmm0, %xmm [all...] |
x86_64-xop-encoding.s | 8 // CHECK: vphsubwd (%rcx,%rax), %xmm1 10 vphsubwd (%rcx,%rax), %xmm1 16 // CHECK: vphsubdq (%rcx,%rax), %xmm1 18 vphsubdq (%rcx,%rax), %xmm1 32 // CHECK: vphaddwq (%rcx), %xmm4 34 vphaddwq (%rcx), %xmm4 48 // CHECK: vphadduwq (%rcx,%rax), %xmm6 50 vphadduwq (%rcx,%rax), %xmm6 64 // CHECK: vphaddudq 8(%rcx,%rax), %xmm4 66 vphaddudq 8(%rcx,%rax), %xmm [all...] |
x86_64-avx-encoding.s | 35 // CHECK: vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 37 vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 39 // CHECK: vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 41 vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 43 // CHECK: vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 45 vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 47 // CHECK: vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 49 vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 51 // CHECK: vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11 53 vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm1 [all...] |
/external/llvm/test/MC/MachO/ |
x86_64-reloc-arithmetic.s | 8 leaq _bar(%rip), %rcx
|
/external/valgrind/main/coregrind/m_mach/ |
mach_traps-amd64-darwin.S | 40 movq %rcx, %r10 51 // movq %rcx, %r10 61 movq %rcx, %r10 71 movq %rcx, %r10 81 movq %rcx, %r10 91 movq %rcx, %r10 101 movq %rcx, %r10 111 movq %rcx, %r10 121 movq %rcx, %r10 131 movq %rcx, %r1 [all...] |
/external/valgrind/main/exp-bbv/tests/amd64-linux/ |
million.S | 7 xor %rcx,%rcx # not needed, pads total to 1M 10 mov $499997,%rcx # load counter 12 dec %rcx # repeat count times
|
clone_test.S | 13 mov $499,%rcx # load counter 15 dec %rcx # repeat count times 61 mov $499997,%rcx # load counter 63 dec %rcx # repeat count times 70 mov $999997,%rcx # load counter 72 dec %rcx # repeat count times 85 mov $250000,%rcx # load counter 87 dec %rcx # repeat count times
|
/external/v8/test/cctest/ |
test-disasm-x64.cc | 72 __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4] 83 __ addq(rdi, Operand(rbp, rcx, times_4, 0)); 84 __ addq(rdi, Operand(rbp, rcx, times_4, 12)); 85 __ addq(Operand(rbp, rcx, times_4, 12), Immediate(12)); 95 __ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000)); 96 __ cmpb(rbx, Operand(rbp, rcx, times_2, 0)); 97 __ cmpb(Operand(rbp, rcx, times_2, 0), rbx); 111 __ movsxbq(rdx, Operand(rcx, 0)); 112 __ movsxwq(rdx, Operand(rcx, 0)) 122 __ bts(Operand(rdx, 0), rcx); local 123 __ bts(Operand(rbx, rcx, times_4, 0), rcx); local [all...] |
test-macro-assembler-x64.cc | 75 using v8::internal::rcx; 86 // in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in 144 __ Move(rcx, Smi::FromInt(0)); 146 __ cmpq(rcx, rdx); 196 __ Move(rcx, Smi::FromInt(x)); 197 __ movq(r8, rcx); 200 __ SmiCompare(rcx, rdx); 213 __ cmpq(rcx, r8); 220 __ SmiCompare(rdx, rcx); 230 __ cmpq(rcx, rcx) [all...] |
/external/openssl/crypto/rc4/asm/ |
rc4-x86_64.S | 17 movq %rcx,%r13 19 xorq %rcx,%rcx 42 movl (%rdi,%rcx,4),%edx 43 movl %eax,(%rdi,%rcx,4) 60 movl (%rdi,%rcx,4),%edx 61 movl %eax,(%rdi,%rcx,4) 68 movl (%rdi,%rcx,4),%edx 69 movl %ebx,(%rdi,%rcx,4) 76 movl (%rdi,%rcx,4),%ed [all...] |
rc4-md5-x86_64.S | 17 movq %rcx,%r11 23 xorq %rcx,%rcx 53 movl (%rdi,%rcx,4),%edx 55 movl %eax,(%rdi,%rcx,4) 72 movl (%rdi,%rcx,4),%edx 74 movl %ebx,(%rdi,%rcx,4) 90 movl (%rdi,%rcx,4),%edx 92 movl %eax,(%rdi,%rcx,4) 108 movl (%rdi,%rcx,4),%ed [all...] |
/external/libvpx/libvpx/vp8/common/x86/ |
recon_mmx.asm | 39 movsxd rcx, dword ptr arg(3) ;dst_stride 45 movq [rdi+rcx], mm1 46 movq [rdi+rcx*2], mm2 49 lea rdi, [rdi+rcx*2] 52 add rdi, rcx 59 movq [rdi+rcx], mm4 61 movq [rdi+rcx*2], mm5 62 lea rdi, [rdi+rcx*2] 67 movq [rdi+rcx], mm0 68 movq [rdi+rcx*2],mm [all...] |
postproc_mmx.asm | 61 mov rcx, 8 66 dec rcx 74 mov rcx, 8 79 dec rcx 93 mov rcx, 15 ; 111 dec rcx 182 mov rcx, rdx 184 and rcx, 127 188 movq mm4, [rax + rcx*2] ;vp8_rv[rcx*2 [all...] |
/external/openssl/crypto/bn/asm/ |
x86_64-gf2m.S | 23 movq %rsi,%rcx 26 shrq $2,%rcx 30 xorq %rcx,%rdx 78 movq (%rsp,%rdi,8),%rcx 80 movq %rcx,%rbx 81 shlq $4,%rcx 85 xorq %rcx,%rax 93 movq (%rsp,%rdi,8),%rcx 95 movq %rcx,%rbx 96 shlq $12,%rcx [all...] |
x86_64-mont.S | 44 movq (%rcx),%rax 72 movq (%rcx,%r15,8),%rax 106 movq (%rcx),%rax 136 movq (%rcx,%r15,8),%rax 174 .Lsub: sbbq (%rcx,%r14,8),%rax 185 movq %rdi,%rcx 186 andq %rax,%rcx 188 orq %rcx,%rsi 242 movq (%rcx),%rax 255 movq 8(%rcx),%ra [all...] |
/external/openssl/crypto/aes/asm/ |
aesni-x86_64.S | 46 movups (%rcx),%xmm0 48 movups 16(%rcx),%xmm1 49 leaq 32(%rcx),%rcx 53 movups (%rcx),%xmm0 60 movups 16(%rcx),%xmm1 63 leaq 32(%rcx),%rcx 65 movups (%rcx),%xmm0 79 movups (%rcx),%xmm [all...] |
/external/libvpx/libvpx/vp8/encoder/x86/ |
quantize_ssse3.asm | 42 mov rdi, rcx ; BLOCK *b 51 mov rcx, [rdi + vp8_block_round] 59 movdqa xmm2, [rcx] 60 movdqa xmm3, [rcx + 16] 81 mov rcx, [rsi + vp8_blockd_dqcoeff] 108 movdqa [rcx], xmm0 ;store dqcoeff 109 movdqa [rcx + 16], xmm4 ;store dqcoeff 110 mov rcx, [rsi + vp8_blockd_eob] 118 mov BYTE PTR [rcx], al ;store eob
|
subtract_mmx.asm | 31 movsxd rcx, dword ptr arg(4);pitch 43 movd mm1, [rax+rcx] 47 movq [rdi+rcx*2],mm0 51 movd mm1, [rax+rcx*2] 55 movq [rdi+rcx*4], mm0 58 lea rcx, [rcx+rcx*2] 63 movd mm1, [rax+rcx] 67 movq [rdi+rcx*2], mm [all...] |
/external/compiler-rt/lib/tsan/rtl/ |
tsan_rtl_amd64.S | 11 push %rcx 13 .cfi_rel_offset %rcx, 0 66 pop %rcx 72 .cfi_restore %rcx 91 push %rcx 93 .cfi_rel_offset %rcx, 0 146 pop %rcx 152 .cfi_restore %rcx
|
/external/v8/src/x64/ |
builtins-x64.cc | 135 __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); 136 __ decb(FieldOperand(rcx, 168 __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); 169 __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); local 170 __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); local 175 __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); 188 __ InitializeFieldsWithFiller(rcx, rsi, rdx); 191 __ InitializeFieldsWithFiller(rcx, rdi, rdx); 209 __ movzxbq(rcx, 211 __ addq(rdx, rcx); 239 __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); \/\/ setup the map local 1499 __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx); local 1503 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx); local 1504 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx); local [all...] |