HomeSort by relevance Sort by last modified time
    Searched full:movd (Results 51 - 75 of 277) sorted by null

1 23 4 5 6 7 8 91011>>

  /external/llvm/test/CodeGen/X86/
ret-mmx.ll 30 ; CHECK: movd {{.*}}, %xmm0
37 ; CHECK: movd {{.*}}, %xmm0
uint_to_fp-2.ll 6 ; CHECK: movd
vec_set-B.ll 10 ; movd %eax, %xmm0
vec_shuffle-17.ll 4 ; CHECK: movd {{%rdi|%rcx}}, %xmm0
blend-msb.ll 9 ;CHECK-NEXT: movd
19 ;CHECK-NEXT: movd
vec_shuffle-14.ll 11 ; X86-32: movd 4(%esp), %xmm0
14 ; X86-64: movd %e{{..}}, %xmm0
27 ; X86-64: movd %r{{..}}, %xmm0
vshift-1.ll 18 ; CHECK: movd
40 ; CHECK: movd
65 ; CHECK: movd
vshift-2.ll 18 ; CHECK: movd
39 ; CHECK: movd
65 ; CHECK: movd
  /external/chromium_org/third_party/openssl/openssl/crypto/bn/asm/
x86-gf2m.S 13 movd %eax,%mm2
14 movd %ebx,%mm3
43 movd (%esp,%esi,4),%mm0
47 movd (%esp,%edi,4),%mm2
53 movd (%esp,%esi,4),%mm1
59 movd (%esp,%edi,4),%mm2
65 movd (%esp,%esi,4),%mm1
71 movd (%esp,%edi,4),%mm2
77 movd (%esp,%esi,4),%mm1
83 movd (%esp,%edi,4),%mm
    [all...]
bn-586.pl 42 &movd("mm0",&wparam(3)); # mm0 = w
47 &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
49 &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
51 &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
53 &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
55 &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
58 &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
60 &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
62 &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
64 &movd(&DWP(0,$r,"",0),"mm1")
    [all...]
  /external/openssl/crypto/bn/asm/
x86-gf2m.S 13 movd %eax,%mm2
14 movd %ebx,%mm3
43 movd (%esp,%esi,4),%mm0
47 movd (%esp,%edi,4),%mm2
53 movd (%esp,%esi,4),%mm1
59 movd (%esp,%edi,4),%mm2
65 movd (%esp,%esi,4),%mm1
71 movd (%esp,%edi,4),%mm2
77 movd (%esp,%esi,4),%mm1
83 movd (%esp,%edi,4),%mm
    [all...]
bn-586.pl 42 &movd("mm0",&wparam(3)); # mm0 = w
47 &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
49 &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
51 &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
53 &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
55 &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
58 &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
60 &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
62 &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
64 &movd(&DWP(0,$r,"",0),"mm1")
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
postproc_mmx.asm 96 movd mm1, DWORD PTR [rdi];
116 movd mm1, DWORD PTR [rsi] ; [s-pitch*8]
117 movd mm2, DWORD PTR [rdi] ; [s+pitch*7]
176 movd mm1, DWORD PTR [rsi+rax*8]
205 movd DWORD PTR [rsp+rcx*4], mm1 ;d[rcx*4]
211 movd mm1, DWORD PTR [rsp+rcx*4] ;d[rcx*4]
213 movd [rsi], mm1
sad_mmx.asm 273 movd mm0, DWORD PTR [rsi]
274 movd mm1, DWORD PTR [rdi]
276 movd mm2, DWORD PTR [rsi+rax]
277 movd mm3, DWORD PTR [rdi+rdx]
299 movd mm4, DWORD PTR [rsi]
300 movd mm5, DWORD PTR [rdi]
302 movd mm6, DWORD PTR [rsi+rax]
303 movd mm7, DWORD PTR [rdi+rdx]
sad_sse2.asm 224 movd mm0, DWORD PTR [rsi]
225 movd mm1, DWORD PTR [rdi]
227 movd mm2, DWORD PTR [rsi+rax]
228 movd mm3, DWORD PTR [rdi+rdx]
237 movd mm4, DWORD PTR [rsi]
239 movd mm5, DWORD PTR [rdi]
240 movd mm6, DWORD PTR [rsi+rax]
242 movd mm7, DWORD PTR [rdi+rdx]
recon_sse2.asm 166 movd mm1, edx
287 movd mm1, edx
382 movd xmm3, [rsi-1]
398 movd xmm3, [rsi]
399 movd xmm5, [rsi+rax]
515 movd mm0, [rsi]
516 movd mm1, [rsi+rax]
528 movd xmm0, [rsi]
529 movd xmm3, [rsi+rax]
530 movd xmm1, [rsi+rax*2
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_sad4d_sse2.asm 17 movd m0, [srcq +%2]
19 movd m6, [ref1q+%3]
20 movd m4, [ref2q+%3]
21 movd m7, [ref3q+%3]
22 movd m5, [ref4q+%3]
35 movd m1, [ref1q+%3]
36 movd m2, [ref2q+%3]
37 movd m3, [ref3q+%3]
38 movd m4, [ref4q+%3]
vp9_variance_impl_mmx.asm 345 movd mm0, [rax] ; Copy 4 bytes to mm0
346 movd mm1, [rbx] ; Copy 4 bytes to mm1
354 movd mm1, [rbx] ; Copy 4 bytes to mm1
359 movd mm0, [rax] ; Copy 4 bytes to mm0
368 movd mm1, [rbx] ; Copy 4 bytes to mm1
372 movd mm0, [rax] ; Copy 4 bytes to mm0
381 movd mm1, [rbx] ; Copy 4 bytes to mm1
385 movd mm0, [rax] ; Copy 4 bytes to mm0
455 movd mm0, [rax] ; Copy eight bytes to mm0
456 movd mm1, [rbx] ; Copy eight bytes to mm
    [all...]
vp9_error_sse2.asm 71 movd eax, m4
72 movd edx, m5
vp9_sad_mmx.asm 273 movd mm0, DWORD PTR [rsi]
274 movd mm1, DWORD PTR [rdi]
276 movd mm2, DWORD PTR [rsi+rax]
277 movd mm3, DWORD PTR [rdi+rdx]
299 movd mm4, DWORD PTR [rsi]
300 movd mm5, DWORD PTR [rdi]
302 movd mm6, DWORD PTR [rsi+rax]
303 movd mm7, DWORD PTR [rdi+rdx]
  /external/chromium_org/third_party/openssl/openssl/crypto/modes/asm/
ghash-x86.S 216 movd %mm0,%ebp
226 movd %mm0,%ebx
238 movd %mm0,%ebp
250 movd %mm0,%ebx
262 movd %mm0,%ebp
274 movd %mm0,%ebx
286 movd %mm0,%ebp
298 movd %mm0,%ebx
310 movd %mm0,%ebp
322 movd %mm0,%eb
    [all...]
  /external/openssl/crypto/modes/asm/
ghash-x86.S 216 movd %mm0,%ebp
226 movd %mm0,%ebx
238 movd %mm0,%ebp
250 movd %mm0,%ebx
262 movd %mm0,%ebp
274 movd %mm0,%ebx
286 movd %mm0,%ebp
298 movd %mm0,%ebx
310 movd %mm0,%ebp
322 movd %mm0,%eb
    [all...]
  /external/llvm/lib/Target/X86/
README-MMX.txt 20 movd %eax, %mm0
55 movd %eax, %mm0
66 movd %eax, %mm0
67 movd %mm0, %rax
  /external/libvpx/libvpx/vp9/common/x86/
vp9_intrapred_ssse3.asm 43 movd m1, [leftq+lineq*2 ]
44 movd m2, [leftq+lineq*2+1]
47 movd [dstq ], m1
48 movd [dstq+strideq], m2
61 movd m1, [leftq+lineq*2 ]
62 movd m2, [leftq+lineq*2+1]
79 movd m1, [leftq+lineq*2 ]
80 movd m2, [leftq+lineq*2+1]
97 movd m1, [leftq+lineq*2 ]
98 movd m2, [leftq+lineq*2+1
    [all...]
  /external/chromium_org/third_party/mesa/src/src/mesa/x86/
3dnow_xform4.S 158 MOVD ( REGIND(ECX), MM0 ) /* | m00 */
161 MOVD ( REGOFF(40, ECX), MM1 ) /* | m22 */
174 MOVD ( REGOFF(8, EAX), MM3 ) /* | x2 */
235 MOVD ( REGOFF(8, ECX), MM6 ) /* | m2 */
238 MOVD ( REGOFF(40, ECX), MM7 ) /* | m10 */
278 MOVD ( REGOFF(12, EAX), MM0 ) /* | x3 */
324 MOVD ( REGIND(ECX), MM0 ) /* | m00 */
327 MOVD ( REGOFF(40, ECX), MM2 ) /* | m22 */
339 MOVD ( REGOFF(12, EAX), MM7 ) /* | x3 */
398 MOVD ( REGIND(ECX), MM0 ) /* | m00 *
    [all...]

Completed in 1205 milliseconds

1 23 4 5 6 7 8 91011>>