HomeSort by relevance Sort by last modified time
    Searched refs:movq (Results 1 - 25 of 135) sorted by null

1 2 3 4 5 6

  /external/llvm/test/MC/ELF/
x86_64-reloc-sizetest.s 6 L: movq $(L + 2147483648),%rax
merge.s 14 movq foo@GOTPCREL, %rax
15 movq zed, %rax
relocation.s 7 movq $bar, %rdx # R_X86_64_32S
8 movq $bar, bar(%rip) # R_X86_64_32S
10 movq bar, %rdx # R_X86_64_32S
18 movq foo(%rip), %rdx
  /external/valgrind/main/coregrind/m_mach/
mach_traps-amd64-darwin.S 39 movq $__NR_task_self_trap, %rax
40 movq %rcx, %r10
50 // movq $__NR_host_self_trap, %rax
51 // movq %rcx, %r10
60 movq $__NR_thread_self_trap, %rax
61 movq %rcx, %r10
70 movq $__NR_mach_msg_trap, %rax
71 movq %rcx, %r10
80 movq $__NR_mach_reply_port, %rax
81 movq %rcx, %r1
    [all...]
  /external/libvpx/vp8/common/x86/
recon_mmx.asm 78 movq mm0, [rsi]
83 movq mm1, [rsi+rax]
84 movq mm2, [rsi+rax*2]
89 movq [rdi], mm0
92 movq [rdi+rcx], mm1
93 movq [rdi+rcx*2], mm2
97 movq mm3, [rsi]
100 movq mm4, [rsi+rax]
102 movq mm5, [rsi+rax*2]
103 movq [rdi], mm
    [all...]
  /external/libvpx/vp8/encoder/x86/
quantize_mmx.asm 29 movq mm0, [rsi]
32 movq mm1, [rax]
34 movq mm3, mm0
40 movq mm2, mm3
44 movq mm3, mm1
47 movq mm1, [rdx]
50 movq mm2, [rcx]
59 movq mm0, mm3
61 movq [rdi], mm3
64 movq mm2, [rax
    [all...]
subtract_mmx.asm 39 movq [rdi], mm0
47 movq [rdi+rcx*2],mm0
55 movq [rdi+rcx*4], mm0
67 movq [rdi+rcx*2], mm0
98 movq mm1, [rsi]
99 movq mm3, [rax]
101 movq mm2, mm1
102 movq mm4, mm3
113 movq [rdi], mm1
114 movq [rdi+8], mm
    [all...]
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-linux.S 114 movq $__NR_rt_sigprocmask, %rax // syscall #
115 movq $VKI_SIG_SETMASK, %rdi // how
116 movq %rdx, %rsi // sysmask
117 movq %rcx, %rdx // postmask
118 movq %r8, %r10 // sigsetSzB
130 movq %rsi, %rax /* rax --> VexGuestAMD64State * */
132 movq OFFSET_amd64_RDI(%rax), %rdi
133 movq OFFSET_amd64_RSI(%rax), %rsi
134 movq OFFSET_amd64_RDX(%rax), %rdx
135 movq OFFSET_amd64_R10(%rax), %r1
    [all...]
syscall-amd64-darwin.S 87 movq %rsp, %rbp
103 movq $__NR_rt_sigprocmask, %rax // syscall #
104 movq $VKI_SIG_SETMASK, %rdi // how
105 movq -24(%rbp), %rsi // sysmask
106 movq -32(%rbp), %rdx // postmask
107 movq -40(%rbp), %r10 // sigsetSzB in r10 not rcx
117 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
118 movq OFFSET_amd64_RDI(%r11), %rdi
119 movq OFFSET_amd64_RSI(%r11), %rsi
120 movq OFFSET_amd64_RDX(%r11), %rd
    [all...]
  /dalvik/vm/mterp/x86-atom/
OP_RETURN_WIDE.S 32 movq (rFP, rINST, 4), %xmm0 # %xmm0<- vAA
33 movq %xmm0, offGlue_retval(%edx)# glue->retval<- vAA
OP_MOVE_RESULT_WIDE.S 35 movq offGlue_retval(%eax), %xmm0 # %xmm0<- glue->retval
36 movq %xmm0, (rFP, rINST, 4) # vA<- glue->retval
OP_MOVE_WIDE_16.S 34 movq (rFP, %edx, 4), %xmm0 # %xmm0<- vB
35 movq %xmm0, (rFP, %ecx, 4) # vA<- vB; %xmm0
OP_MOVE_WIDE_FROM16.S 32 movq (rFP, %edx, 4), %xmm0 # %xmm0<- vB
33 movq %xmm0, (rFP, rINST, 4) # vA<- vB
OP_SHL_LONG.S 34 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
37 movq (rFP, %edx, 4), %xmm1 # %xmm1<- vBB
39 movq %xmm1, (rFP, rINST, 4) # vAA<- shifted vBB
OP_SHL_LONG_2ADDR.S 36 movq (rFP, rINST, 4), %xmm1 # %xmm1<- vA
37 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
40 movq %xmm1, (rFP, rINST, 4) # vA<- shifted vA
OP_USHR_LONG_2ADDR.S 35 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
37 movq (rFP, rINST, 4), %xmm1 # %xmm1<- vA
40 movq %xmm1, (rFP, rINST, 4) # vA<- shifted vA
binopWide.S 36 movq (rFP, %ecx, 4), %xmm0 # %xmm0<- vBB
37 movq (rFP, %edx, 4), %xmm1 # %xmm1<- vCC
39 movq %xmm0, (rFP, rINST, 4) # vAA<- %ecx
binopWide2addr.S 37 movq (rFP, rINST, 4), %xmm1 # %xmm1<- vB
38 movq (rFP, %edx, 4), %xmm0 # %xmm0<- vA
40 movq %xmm0, (rFP, %edx, 4) # vA<- %xmm0; result
  /external/libffi/src/x86/
unix64.S 48 movq (%rsp), %r10 /* Load return address. */
50 movq %rdx, (%rax) /* Save flags. */
51 movq %rcx, 8(%rax) /* Save raddr. */
52 movq %rbp, 16(%rax) /* Save old frame pointer. */
53 movq %r10, 24(%rax) /* Relocate return address. */
54 movq %rax, %rbp /* Finalize local stack frame. */
56 movq %rdi, %r10 /* Save a copy of the register area. */
57 movq %r8, %r11 /* Save a copy of the target fn. */
61 movq (%r10), %rdi
62 movq 8(%r10), %rs
    [all...]
  /external/libvpx/vpx_scale/win32/
scaleopt.c 83 movq mm5, const35_1 // mm5 = 66 xx cd xx 33 xx 9a xx
84 movq mm6, const35_2 // mm6 = 9a xx 33 xx cd xx 66 xx
86 movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
214 movq mm5, const45_1 // mm5 = 33 xx 66 xx 9a xx cd xx
215 movq mm6, const45_2 // mm6 = cd xx 9a xx 66 xx 33 xx
217 movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
222 movq mm0, QWORD PTR [esi] // mm0 = 00 01 02 03 04 05 06 07
223 movq mm1, QWORD PTR [esi+1]; // mm1 = 01 02 03 04 05 06 07 08
225 movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
226 movq mm3, mm1 // mm3 = 01 02 03 04 05 06 07 0
1035 movq [esi+ecx], mm0 \/\/ write out eight bytes local
1081 movq [esi+ecx], mm0 \/\/ write out eight bytes local
1158 movq [edi], mm2 local
1161 movq [edi+8], mm4 local
1206 movq [edi], mm2 local
1209 movq [edi+8], mm4 local
    [all...]
  /external/libvpx/vp8/decoder/x86/
dequantize_mmx.asm 29 movq mm1, [rsi]
31 movq [rdi], mm1
33 movq mm1, [rsi+8]
35 movq [rdi+8], mm1
37 movq mm1, [rsi+16]
39 movq [rdi+16], mm1
41 movq mm1, [rsi+24]
43 movq [rdi+24], mm1
68 movq mm0, [rax ]
71 movq mm1, [rax +8
    [all...]
  /external/qemu/distrib/sdl-1.2.12/src/audio/
SDL_mixer_MMX_VC.c 58 movq mm1, mm0 //%%mm0,%%mm1
70 movq mm1, [esi] //(%%esi),%%mm1\n" // mm1 = a|b|c|d
71 movq mm2, mm1 //%%mm1,%%mm2\n" // mm2 = a|b|c|d
72 movq mm4, [esi + 8] //8(%%esi),%%mm4\n" // mm4 = e|f|g|h
74 movq mm7, [edi] //(%%edi),%%mm7\n" // mm7 = dst[0]"
78 movq mm5, mm4 //%%mm4,%%mm5\n" // mm5 = e|f|g|h
81 movq mm3, mm1 //%%mm1,%%mm3\n" // mm3 = l(a*v)|l(b*v)|l(c*v)|l(d*v)
83 movq mm6, mm4 //%%mm4,%%mm6\n" // mm6 = l(e*v)|l(f*v)|l(g*v)|l(h*v)
88 movq mm5, [edi + 8] //8(%%edi),%%mm5\n" // mm5 = dst[1]
100 movq [edi], mm3 //%%mm3,(%%edi)\n local
102 movq [edi + 8], mm6 \/\/%%mm6,8(%%edi)\\n" local
167 movq [edi], mm3 \/\/%%mm3,(%%edi) \/\/ store back to ram local
    [all...]
  /external/valgrind/main/coregrind/m_dispatch/
dispatch-amd64-darwin.S 77 movq VG_(dispatch_ctr)@GOTPCREL(%rip), %r15
85 movq %rdi, %rbp
88 movq OFFSET_amd64_RIP(%rbp), %rax
125 movq VG_(tt_fast)@GOTPCREL(%rip), %rcx
129 movq %rax, OFFSET_amd64_RIP(%rbp)
136 movq %rax, %rbx
139 movq 0(%rcx,%rbx,1), %r10 /* .guest */
140 movq 8(%rcx,%rbx,1), %r11 /* .host */
163 movq VG_(tt_fast)@GOTPCREL(%rip), %rcx
167 movq %rax, OFFSET_amd64_RIP(%rbp
    [all...]
dispatch-amd64-linux.S 78 movq VG_(dispatch_ctr)@GOTPCREL(%rip), %r15
86 movq %rdi, %rbp
89 movq OFFSET_amd64_RIP(%rbp), %rax
129 movq VG_(tt_fast)@GOTPCREL(%rip), %rcx
133 movq %rax, OFFSET_amd64_RIP(%rbp)
140 movq %rax, %rbx /* next guest addr */
143 movq 0(%rcx,%rbx,1), %r10 /* .guest */
144 movq 8(%rcx,%rbx,1), %r11 /* .host */
170 movq VG_(tt_fast)@GOTPCREL(%rip), %rcx
174 movq %rax, OFFSET_amd64_RIP(%rbp
    [all...]
  /external/llvm/test/MC/X86/
x86_64-encoding.s 9 // CHECK: movq %gs:(%rdi), %rax
11 movq %gs:(%rdi), %rax label
123 // CHECK: movq 57005(,%riz), %rbx
125 movq 57005(,%riz), %rbx
127 // CHECK: movq 48879(,%riz), %rax
129 movq 48879(,%riz), %rax
131 // CHECK: movq -4(,%riz,8), %rax
133 movq -4(,%riz,8), %rax
135 // CHECK: movq (%rcx,%riz), %rax
137 movq (%rcx,%riz), %ra
    [all...]

Completed in 124 milliseconds

1 2 3 4 5 6