HomeSort by relevance Sort by last modified time
    Searched refs:movq (Results 1 - 25 of 159) sorted by null

1 2 3 4 5 6 7

  /external/llvm/test/MC/ELF/
x86_64-reloc-sizetest.s 6 L: movq $(L + 2147483648),%rax
merge.s 14 movq foo@GOTPCREL, %rax
15 movq zed, %rax
relocation.s 7 movq $bar, %rdx # R_X86_64_32S
8 movq $bar, bar(%rip) # R_X86_64_32S
10 movq bar, %rdx # R_X86_64_32S
18 movq foo(%rip), %rdx
  /external/valgrind/main/coregrind/m_mach/
mach_traps-amd64-darwin.S 39 movq $__NR_task_self_trap, %rax
40 movq %rcx, %r10
50 // movq $__NR_host_self_trap, %rax
51 // movq %rcx, %r10
60 movq $__NR_thread_self_trap, %rax
61 movq %rcx, %r10
70 movq $__NR_mach_msg_trap, %rax
71 movq %rcx, %r10
80 movq $__NR_mach_reply_port, %rax
81 movq %rcx, %r1
    [all...]
  /external/openssl/crypto/bn/asm/
x86_64-mont5.S 23 movq %rsp,%rax
29 movq %rax,8(%rsp,%r9,8)
31 movq %rdx,%r12
32 movq %r10,%r11
39 movq 0(%rax,%r10,8),%xmm4
40 movq 8(%rax,%r10,8),%xmm5
41 movq 16(%rax,%r10,8),%xmm6
42 movq 24(%rax,%r10,8),%xmm7
44 movq -96(%r12),%xmm0
45 movq -32(%r12),%xmm
    [all...]
x86_64-gf2m.S 7 movq $-1,%r9
19 movq %rax,%rdx
23 movq %rsi,%rcx
28 movq %rdi,%rbx
35 movq %r9,%r13
36 movq $0,0(%rsp)
38 movq %r9,8(%rsp)
39 movq %r11,%r14
40 movq %r10,16(%rsp)
42 movq %r13,24(%rsp
    [all...]
x86_64-mont.S 26 movq %rsp,%r11
31 movq %r11,8(%rsp,%r9,8)
33 movq %rdx,%r12
34 movq (%r8),%r8
35 movq (%r12),%rbx
36 movq (%rsi),%rax
41 movq %r8,%rbp
43 movq %rax,%r10
44 movq (%rcx),%rax
47 movq %rdx,%r1
    [all...]
modexp512-x86_64.S 6 movq 0(%rsi),%rax
10 movq %r8,0(%rcx)
11 movq %rdx,%rbx
13 movq 8(%rsi),%rax
19 movq %rdx,%rbx
21 movq 16(%rsi),%rax
27 movq %rdx,%rbx
29 movq 24(%rsi),%rax
35 movq %rdx,%rbx
37 movq 32(%rsi),%ra
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
recon_mmx.asm 31 movq mm0, [rsi]
36 movq mm1, [rsi+rax]
37 movq mm2, [rsi+rax*2]
42 movq [rdi], mm0
45 movq [rdi+rcx], mm1
46 movq [rdi+rcx*2], mm2
50 movq mm3, [rsi]
53 movq mm4, [rsi+rax]
55 movq mm5, [rsi+rax*2]
56 movq [rdi], mm
    [all...]
dequantize_mmx.asm 29 movq mm1, [rsi]
31 movq [rdi], mm1
33 movq mm1, [rsi+8]
35 movq [rdi+8], mm1
37 movq mm1, [rsi+16]
39 movq [rdi+16], mm1
41 movq mm1, [rsi+24]
43 movq [rdi+24], mm1
71 movq mm0, [rax ]
74 movq mm1, [rax +8
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
quantize_mmx.asm 29 movq mm0, [rsi]
32 movq mm1, [rax]
34 movq mm3, mm0
40 movq mm2, mm3
44 movq mm3, mm1
47 movq mm1, [rdx]
50 movq mm2, [rcx]
59 movq mm0, mm3
61 movq [rdi], mm3
64 movq mm2, [rax
    [all...]
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-linux.S 114 movq $__NR_rt_sigprocmask, %rax // syscall #
115 movq $VKI_SIG_SETMASK, %rdi // how
116 movq %rdx, %rsi // sysmask
117 movq %rcx, %rdx // postmask
118 movq %r8, %r10 // sigsetSzB
130 movq %rsi, %rax /* rax --> VexGuestAMD64State * */
132 movq OFFSET_amd64_RDI(%rax), %rdi
133 movq OFFSET_amd64_RSI(%rax), %rsi
134 movq OFFSET_amd64_RDX(%rax), %rdx
135 movq OFFSET_amd64_R10(%rax), %r1
    [all...]
syscall-amd64-darwin.S 87 movq %rsp, %rbp
103 movq $__NR_rt_sigprocmask, %rax // syscall #
104 movq $VKI_SIG_SETMASK, %rdi // how
105 movq -24(%rbp), %rsi // sysmask
106 movq -32(%rbp), %rdx // postmask
107 movq -40(%rbp), %r10 // sigsetSzB in r10 not rcx
117 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
118 movq OFFSET_amd64_RDI(%r11), %rdi
119 movq OFFSET_amd64_RSI(%r11), %rsi
120 movq OFFSET_amd64_RDX(%r11), %rd
    [all...]
  /dalvik/vm/mterp/x86/
OP_ADD_DOUBLE.S 7 movq (rFP, %eax, 4), %xmm0 # %xmm0<- vBB
8 movq (rFP, %ecx, 4), %xmm1 # %xmm1<- vCC
12 movq %xmm0, (rFP, rINST, 4) # vAA<- vBB * vCC
OP_MUL_DOUBLE.S 8 movq (rFP, %eax, 4), %xmm0 # %xmm0<- vBB
9 movq (rFP, %ecx, 4), %xmm1 # %xmm1<- vCC
13 movq %xmm0, (rFP, rINST, 4) # vAA<- vBB * vCC
OP_SUB_DOUBLE.S 8 movq (rFP, %eax, 4), %xmm0 # %xmm0<- vBB
9 movq (rFP, %ecx, 4), %xmm1 # %xmm1<- vCC
13 movq %xmm0, (rFP, rINST, 4) # vAA<- vBB - vCC
OP_ADD_DOUBLE_2ADDR.S 8 movq (rFP, rINST, 4), %xmm1 # %xmm1<- vB
9 movq (rFP, %ecx, 4), %xmm0 # %xmm0<- vA
13 movq %xmm0, (rFP, %ecx, 4) # vA<- %xmm0; result
OP_MUL_DOUBLE_2ADDR.S 9 movq (rFP, rINST, 4), %xmm1 # %xmm1<- vB
10 movq (rFP, %ecx, 4), %xmm0 # %xmm0<- vA
14 movq %xmm0, (rFP, %ecx, 4) # vA<- %xmm0; result
  /external/openssl/crypto/sha/asm/
sha512-x86_64.S 13 movq %rsp,%r11
18 movq %rdi,128+0(%rsp)
19 movq %rsi,128+8(%rsp)
20 movq %rdx,128+16(%rsp)
21 movq %r11,128+24(%rsp)
26 movq 0(%rdi),%rax
27 movq 8(%rdi),%rbx
28 movq 16(%rdi),%rcx
29 movq 24(%rdi),%rdx
30 movq 32(%rdi),%r
    [all...]
  /external/libffi/src/x86/
unix64.S 48 movq (%rsp), %r10 /* Load return address. */
50 movq %rdx, (%rax) /* Save flags. */
51 movq %rcx, 8(%rax) /* Save raddr. */
52 movq %rbp, 16(%rax) /* Save old frame pointer. */
53 movq %r10, 24(%rax) /* Relocate return address. */
54 movq %rax, %rbp /* Finalize local stack frame. */
56 movq %rdi, %r10 /* Save a copy of the register area. */
57 movq %r8, %r11 /* Save a copy of the target fn. */
61 movq (%r10), %rdi
62 movq 8(%r10), %rs
    [all...]
  /external/libvpx/libvpx/vpx_scale/win32/
scaleopt.c 81 movq mm5, const35_1 // mm5 = 66 xx cd xx 33 xx 9a xx
82 movq mm6, const35_2 // mm6 = 9a xx 33 xx cd xx 66 xx
84 movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
210 movq mm5, const45_1 // mm5 = 33 xx 66 xx 9a xx cd xx
211 movq mm6, const45_2 // mm6 = cd xx 9a xx 66 xx 33 xx
213 movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
218 movq mm0, QWORD PTR [esi] // mm0 = 00 01 02 03 04 05 06 07
219 movq mm1, QWORD PTR [esi+1]; // mm1 = 01 02 03 04 05 06 07 08
221 movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
222 movq mm3, mm1 // mm3 = 01 02 03 04 05 06 07 0
1021 movq [esi+ecx], mm0 \/\/ write out eight bytes local
1065 movq [esi+ecx], mm0 \/\/ write out eight bytes local
1140 movq [edi], mm2 local
1143 movq [edi+8], mm4 local
1188 movq [edi], mm2 local
1191 movq [edi+8], mm4 local
    [all...]
  /external/valgrind/main/coregrind/m_dispatch/
dispatch-amd64-darwin.S 104 movq %rsi, %rbp
145 movq $VG_TRC_INVARIANT_FAILED, %rax
146 movq $0, %rdx
151 movq %rax, 0(%rdi)
152 movq %rdx, 8(%rdi)
181 movq $VG_TRC_CHAIN_ME_TO_SLOW_EP, %rax
195 movq $VG_TRC_CHAIN_ME_TO_FAST_EP, %rax
206 movq OFFSET_amd64_RIP(%rbp), %rax
214 movq %rax, %rbx /* next guest addr */
217 movq 0(%rcx,%rbx,1), %r10 /* .guest *
    [all...]
dispatch-amd64-linux.S 105 movq %rsi, %rbp
146 movq $VG_TRC_INVARIANT_FAILED, %rax
147 movq $0, %rdx
152 movq %rax, 0(%rdi)
153 movq %rdx, 8(%rdi)
182 movq $VG_TRC_CHAIN_ME_TO_SLOW_EP, %rax
196 movq $VG_TRC_CHAIN_ME_TO_FAST_EP, %rax
207 movq OFFSET_amd64_RIP(%rbp), %rax
214 movq %rax, %rbx /* next guest addr */
217 movq 0(%rcx,%rbx,1), %r10 /* .guest *
    [all...]
  /external/llvm/test/MC/X86/
x86_errors.s 30 movq (%rsi,%ecx),%xmm0 label
  /external/qemu/distrib/sdl-1.2.15/src/audio/
SDL_mixer_MMX_VC.c 59 movq mm1, mm0 //%%mm0,%%mm1
71 movq mm1, [esi] //(%%esi),%%mm1\n" // mm1 = a|b|c|d
72 movq mm2, mm1 //%%mm1,%%mm2\n" // mm2 = a|b|c|d
73 movq mm4, [esi + 8] //8(%%esi),%%mm4\n" // mm4 = e|f|g|h
75 movq mm7, [edi] //(%%edi),%%mm7\n" // mm7 = dst[0]"
79 movq mm5, mm4 //%%mm4,%%mm5\n" // mm5 = e|f|g|h
82 movq mm3, mm1 //%%mm1,%%mm3\n" // mm3 = l(a*v)|l(b*v)|l(c*v)|l(d*v)
84 movq mm6, mm4 //%%mm4,%%mm6\n" // mm6 = l(e*v)|l(f*v)|l(g*v)|l(h*v)
89 movq mm5, [edi + 8] //8(%%edi),%%mm5\n" // mm5 = dst[1]
101 movq [edi], mm3 //%%mm3,(%%edi)\n local
103 movq [edi + 8], mm6 \/\/%%mm6,8(%%edi)\\n" local
168 movq [edi], mm3 \/\/%%mm3,(%%edi) \/\/ store back to ram local
    [all...]

Completed in 522 milliseconds

1 2 3 4 5 6 7