HomeSort by relevance Sort by last modified time
    Searched refs:rax (Results 1 - 25 of 162) sorted by null

1 2 3 4 5 6 7

  /external/valgrind/main/none/tests/amd64/
loopnel.c 7 long rax = 5UL; local
8 asm volatile ("1: addq $1, %0; loopnel 1b" : "+a" (rax), "+c" (rcx) : : "cc");
9 printf ("%ld %ld\n", rax, rcx);
asorep.c 10 unsigned long rdi, rsi, rcx, rax; local
29 rax = 0x751234560000ULL + (' ' << 8) + '0';
31 : "=D" (rdi), "=c" (rcx), "+a" (rax)
37 || rax != 0x751234560000ULL + (' ' << 8) + '0')
41 : "=S" (rsi), "=a" (rax)
44 || rax != 0x20302030ULL)
58 rax = 0x123450000ULL + ('d' << 8) + 'c';
60 : "=D" (rdi), "=c" (rcx), "+a" (rax)
64 || rax != 0x123450000ULL + ('d' << 8) + 'c')
67 rax = 0x543210000ULL + ('b' << 8) + 'a'
    [all...]
cmpxchg.c 11 ULong rax; variable
24 rdx = 0x11111111; rax = 0x22222222;
28 rax&0xff,rbx&0xff,rcx&0xff);
31 "\tpush %rax\n"
35 "\txor %rax, %rax\n" // get eflags in a known state
37 "\tmov " VG_SYM(rax) ",%rax\n"
42 "\tmov " VG_SYM(rax) "(%rip),%rax\n
    [all...]
  /external/llvm/test/MC/ELF/
x86_64-reloc-sizetest.s 6 L: movq $(L + 2147483648),%rax
relocation.s 12 leaq foo@GOTTPOFF(%rip), %rax # R_X86_64_GOTTPOFF
13 leaq foo@TLSGD(%rip), %rax # R_X86_64_TLSGD
14 leaq foo@TPOFF(%rax), %rax # R_X86_64_TPOFF32
16 leaq foo@dtpoff(%rax), %rcx # R_X86_64_DTPOFF32
20 addq $bar,%rax # R_X86_64_32S
merge.s 14 movq foo@GOTPCREL, %rax
15 movq zed, %rax
  /external/llvm/test/MC/X86/
x86_errors.s 11 // 32: error: register %rax is only available in 64-bit mode
12 addl $0, 0(%rax)
19 movl 0(%rax), 0(%edx) // error: invalid operand for instruction
26 lea (%rsp, %rbp, $4), %rax
x86_64-imm-widths.s 59 // CHECK: addq $0, %rax
61 addq $0x0000000000000000, %rax
63 // CHECK: addq $127, %rax
65 addq $0x000000000000007F, %rax
67 // CHECK: addq $-128, %rax
69 addq $0xFFFFFFFFFFFFFF80, %rax
71 // CHECK: addq $-1, %rax
73 addq $0xFFFFFFFFFFFFFFFF, %rax
75 // CHECK: addq $0, %rax
77 addq $0x0000000000000000, %rax
    [all...]
x86_64-avx-clmul-encoding.s 7 // CHECK: vpclmulqdq $17, (%rax), %xmm10, %xmm13
9 vpclmulhqhqdq (%rax), %xmm10, %xmm13
15 // CHECK: vpclmulqdq $1, (%rax), %xmm10, %xmm13
17 vpclmulhqlqdq (%rax), %xmm10, %xmm13
23 // CHECK: vpclmulqdq $16, (%rax), %xmm10, %xmm13
25 vpclmullqhqdq (%rax), %xmm10, %xmm13
31 // CHECK: vpclmulqdq $0, (%rax), %xmm10, %xmm13
33 vpclmullqlqdq (%rax), %xmm10, %xmm13
39 // CHECK: vpclmulqdq $17, (%rax), %xmm10, %xmm13
41 vpclmulqdq $17, (%rax), %xmm10, %xmm1
    [all...]
x86_64-xop-encoding.s 8 // CHECK: vphsubwd (%rcx,%rax), %xmm1
10 vphsubwd (%rcx,%rax), %xmm1
16 // CHECK: vphsubdq (%rcx,%rax), %xmm1
18 vphsubdq (%rcx,%rax), %xmm1
24 // CHECK: vphsubbw (%rax), %xmm1
26 vphsubbw (%rax), %xmm1
40 // CHECK: vphaddwd (%rdx,%rax), %xmm7
42 vphaddwd (%rdx,%rax), %xmm7
48 // CHECK: vphadduwq (%rcx,%rax), %xmm6
50 vphadduwq (%rcx,%rax), %xmm
    [all...]
x86_64-bmi-encoding.s 11 // CHECK: blsmskl (%rax), %r10d
13 blsmskl (%rax), %r10d
15 // CHECK: blsmskq (%rax), %r10
17 blsmskq (%rax), %r10
27 // CHECK: blsil (%rax), %r10d
29 blsil (%rax), %r10d
31 // CHECK: blsiq (%rax), %r10
33 blsiq (%rax), %r10
43 // CHECK: blsrl (%rax), %r10d
45 blsrl (%rax), %r10
    [all...]
x86_64-encoding.s 9 // CHECK: movq %gs:(%rdi), %rax
11 movq %gs:(%rdi), %rax
65 // CHECK: crc32b %dil, %rax
67 crc32b %dil,%rax
69 // CHECK: crc32b %r11b, %rax
71 crc32b %r11b,%rax
73 // CHECK: crc32b 4(%rbx), %rax
75 crc32b 4(%rbx), %rax
77 // CHECK: crc32q %rbx, %rax
79 crc32q %rbx, %rax
    [all...]
x86_64-fma3-encoding.s 7 // CHECK: vfmadd132pd (%rax), %xmm10, %xmm11
9 vfmadd132pd (%rax), %xmm10, %xmm11
15 // CHECK: vfmadd132ps (%rax), %xmm10, %xmm11
17 vfmadd132ps (%rax), %xmm10, %xmm11
23 // CHECK: vfmadd213pd (%rax), %xmm10, %xmm11
25 vfmadd213pd (%rax), %xmm10, %xmm11
31 // CHECK: vfmadd213ps (%rax), %xmm10, %xmm11
33 vfmadd213ps (%rax), %xmm10, %xmm11
39 // CHECK: vfmadd231pd (%rax), %xmm10, %xmm11
41 vfmadd231pd (%rax), %xmm10, %xmm1
    [all...]
intel-syntax-encoding.s 8 xor rax, 12
15 or rax, 12
22 cmp rax, 12
25 mov QWORD PTR [RSP - 16], RAX
32 add rax, -12
x86_64-avx-encoding.s 311 // CHECK: vcmpps $0, (%rax), %xmm12, %xmm15
313 vcmpps $0, (%rax), %xmm12, %xmm15
323 // CHECK: vcmppd $0, (%rax), %xmm12, %xmm15
325 vcmppd $0, (%rax), %xmm12, %xmm15
    [all...]
x86-64.s 8 monitor %rax, %rcx, %rdx
14 mwait %rax, %rcx
24 // CHECK: orq %rax, %rdx
25 or %rax, %rdx
26 // CHECK: shlq $3, %rax
27 shl $3, %rax
69 // CHECK: xorq $1, %rax
70 xorq $1, %rax
73 // CHECK: xorq $256, %rax
74 xorq $256, %rax
    [all...]
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-linux.S 114 movq $__NR_rt_sigprocmask, %rax // syscall #
123 testq %rax, %rax
130 movq %rsi, %rax /* rax --> VexGuestAMD64State * */
132 movq OFFSET_amd64_RDI(%rax), %rdi
133 movq OFFSET_amd64_RSI(%rax), %rsi
134 movq OFFSET_amd64_RDX(%rax), %rdx
135 movq OFFSET_amd64_R10(%rax), %r10
136 movq OFFSET_amd64_R8(%rax), %r
    [all...]
  /bionic/libm/amd64/
s_lrint.S 35 cvtsd2si %xmm0, %rax
s_lrintf.S 35 cvtss2si %xmm0, %rax
  /external/valgrind/main/coregrind/m_mach/
mach_traps-amd64-darwin.S 39 movq $__NR_task_self_trap, %rax
50 // movq $__NR_host_self_trap, %rax
60 movq $__NR_thread_self_trap, %rax
70 movq $__NR_mach_msg_trap, %rax
80 movq $__NR_mach_reply_port, %rax
90 movq $__NR_swtch_pri, %rax
100 movq $__NR_semaphore_wait_trap, %rax
110 movq $__NR_semaphore_signal_trap, %rax
120 movq $__NR_semaphore_signal_thread_trap, %rax
130 movq $__NR_semaphore_wait_signal_trap, %rax
    [all...]
  /external/valgrind/main/exp-bbv/tests/amd64-linux/
million.S 8 xor %rax,%rax # not needed, pads total to 1M
20 mov $60,%rax # put exit syscall number (60) in rax
  /external/openssl/crypto/bn/asm/
modexp512-x86_64.pl 83 # uses rax, rdx, and args
89 mov (+8*0)($SRC2), %rax
90 mul $OP # rdx:rax = %OP * [0]
92 add %rax, $X[0]
100 mov (+8*$i)($SRC2), %rax
101 mul $OP # rdx:rax = %OP * [$i]
103 add %rax, $X[$i]
116 # uses rax, rdx, and args
122 mov (+8*0)($SRC2), %rax
123 mul $OP # rdx:rax = %OP * [0
    [all...]
x86_64-mont5.pl 83 mov %rsp,%rax
89 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
101 lea .Lmagic_masks(%rip),%rax
104 movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
105 movq 8(%rax,%r10,8),%xmm5 # cache line contains element
106 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
107 movq 24(%rax,%r10,8),%xmm7
125 mov ($ap),%rax
138 mov %rax,$lo0
139 mov ($np),%rax
    [all...]
x86_64-mont.pl 99 mov ($ap),%rax
106 mov %rax,$lo0
107 mov ($np),%rax
113 add %rax,$lo0 # discarded
114 mov 8($ap),%rax
123 add %rax,$hi1
124 mov ($ap,$j,8),%rax
134 add %rax,$hi0
135 mov ($np,$j,8),%rax
144 add %rax,$hi
    [all...]
  /external/libvpx/vp8/common/x86/
recon_mmx.asm 26 movsxd rax, dword ptr arg(3) ;stride
39 movd [rdi+rax], mm2
45 movd [rdi+2*rax], mm3
47 add rdi, rax
52 movd [rdi+2*rax], mm4
80 movsxd rax, dword ptr arg(1) ;src_stride;
83 movq mm1, [rsi+rax]
84 movq mm2, [rsi+rax*2]
87 lea rsi, [rsi+rax*2]
90 add rsi, rax
    [all...]

Completed in 880 milliseconds

1 2 3 4 5 6 7