HomeSort by relevance Sort by last modified time
    Searched full:movd (Results 76 - 100 of 178) sorted by null

1 2 34 5 6 7 8

  /external/libyuv/files/source/
scale_argb.cc 120 movd xmm0, [eax]
121 movd xmm1, [eax + ebx]
123 movd xmm2, [eax + ebx * 2]
124 movd xmm3, [eax + edi]
210 movd xmm5, eax // xmm5 = y fraction
298 movd xmm0, eax // high fraction 0..127
301 movd xmm5, eax // low fraction 128..1
436 "movd (%0),%%xmm0 \n"
437 "movd (%0,%1,1),%%xmm1 \n"
439 "movd (%0,%1,2),%%xmm2 \n
    [all...]
row_win.cc 328 movd xmm5, eax
331 movd xmm6, eax
379 movd xmm5, eax
382 movd xmm6, eax
433 movd xmm4, eax
3048 movd [edx], xmm0 local
3110 movd [edx], xmm0 local
3180 movd [edx], xmm0 local
3269 movd [edx], xmm0 local
4006 movd [edx], xmm0 local
    [all...]
compare.cc 80 movd xmm0, [esp + 12] // seed
120 movd eax, xmm0 // return hash
163 "movd %2,%%xmm0 \n"
199 "movd %%xmm0,%3 \n"
288 movd eax, xmm0
325 "movd %%xmm0,%3 \n"
row_posix.cc 331 "movd %%eax,%%xmm5 \n"
334 "movd %%eax,%%xmm6 \n"
381 "movd %%eax,%%xmm5 \n"
384 "movd %%eax,%%xmm6 \n"
434 "movd %%eax,%%xmm4 \n"
    [all...]
format_conversion.cc 35 movd xmm5, [esp + 12] // selector
45 movd [edx], xmm0 local
58 "movd %3,%%xmm5 \n"
66 "movd %%xmm0,(%1) \n"
scale.cc 465 movd dword ptr [edx], xmm0
535 movd dword ptr [edx], xmm0
748 movd [edx + 8], xmm1 local
811 movd [edx], xmm6 // write 6 pixels local
813 movd [edx + 2], xmm6 local
856 movd [edx], xmm1 // write 6 pixels local
858 movd [edx + 2], xmm1 local
953 movd xmm5, eax // xmm5 = y fraction
1044 movd xmm0, eax // high fraction 0..127
1047 movd xmm5, eax // low fraction 128..
    [all...]
  /external/zlib/src/contrib/inflate86/
inffast.S 884 movd %ebp, hold_mm
894 movd lmask(%esp), lmask_mm
896 movd dmask(%esp), dmask_mm
918 movd bitslong_r, tmp_mm
919 movd (in_r), %mm7
927 movd lmask_mm, %eax
933 movd %ecx, used_mm
954 movd %eax, used_mm
955 movd hold_mm, %ecx
966 movd bitslong_r, tmp_m
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
loopfilter_mmx.asm 562 movd [rsi+rax*4+2], mm2
565 movd [rdi+rax*4+2], mm2
566 movd [rsi+rax*2+2], mm6
569 movd [rsi+rax+2],mm6
571 movd [rsi+2], mm1
574 movd [rdi+2], mm1
577 movd [rdi+rax+2],mm5
580 movd [rdi+rax*2+2], mm5
    [all...]
sad_sse4.asm 128 movd xmm0, [rsi]
135 movd xmm0, [rsi]
144 movd xmm0, [rsi + rax]
variance_impl_ssse3.asm 338 movd [rsi], xmm0
339 movd [rdi], xmm6
idctllm_sse2.asm 33 movd xmm4, [rax]
34 movd xmm5, [rdx]
45 movd [rax], xmm5
46 movd [rax+32], xmm5
381 movd xmm4, [rdx]
  /external/openssl/crypto/bn/asm/
x86-gf2m.pl 64 &movd ($A,$a);
65 &movd ($B,$b);
94 &movd ($R,&DWP(0,"esp",@i[0],4));
99 &movd (@T[1],&DWP(0,"esp",@i[1],4));
108 &movd (@T[1],&DWP(0,"esp",@i[1],4));
113 &movd (@T[0],&DWP(0,"esp",@i[0],4));
  /external/libvpx/libvpx/vp8/encoder/x86/
quantize_sse2.asm 60 movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
217 movd eax, xmm2
360 movd eax, xmm1
  /external/llvm/test/CodeGen/X86/
trunc-ext-ld-st.ll 24 ;CHECK: movd
widen_cast-2.ll 4 ; CHECK: movd
vec_shuffle-26.ll 50 ; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
51 ; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
  /external/openssl/crypto/modes/asm/
ghash-x86.pl 370 &movd ($rem[0],$Zlo);
388 &movd ($rem[1],$Zlo);
397 &movd ($Zhl,$Zhi);
399 &movd ($Zlh,$Zlo);
400 &movd ($Zhh,$Zhi);
518 &movd ($rem,$Zlo);
531 &movd ($rem,$Zlo);
546 &movd ($rem,$Zlo);
561 &movd ($rem,$Zlo);
572 &movd ($rem,$Zlo)
    [all...]
  /external/llvm/test/MC/X86/
x86-64.s 979 movd %rax, %mm5 // CHECK: movd %rax, %mm5 # encoding: [0x48,0x0f,0x6e,0xe8] label
980 movd %mm5, %rbx // CHECK: movd %mm5, %rbx # encoding: [0x48,0x0f,0x7e,0xeb] label
981 movq %rax, %mm5 // CHECK: movd %rax, %mm5 # encoding: [0x48,0x0f,0x6e,0xe8]
982 movq %mm5, %rbx // CHECK: movd %mm5, %rbx # encoding: [0x48,0x0f,0x7e,0xeb]
1159 // CHECK: movd %rdi, %xmm0
1163 // CHECK: movd %rdi, %xmm0
1165 movd %rdi,%xmm0
1167 // CHECK: movd %xmm0, %ra
    [all...]
  /external/qemu/distrib/sdl-1.2.15/src/audio/
SDL_mixer_MMX.c 54 " movd %%eax,%%mm0\n"
148 " movd %%eax,%%mm0\n"
  /external/qemu/distrib/sdl-1.2.15/src/video/
mmx.h 393 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
394 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
395 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
396 #define movd(vars, vard) \ macro
397 __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
398 "movd %%mm0, %0" \
SDL_yuv_mmx.c 109 "movd (%%ebx),%%mm1\n" // 0 0 0 0 v3 v2 v1 v0
112 "movd (%2), %%mm2\n" // 0 0 0 0 l3 l2 l1 l0
144 "movd (%1), %%mm1\n" // 0 0 0 0 u3 u2 u1 u0
266 "movd (%1), %%mm0\n" // 4 Cb 0 0 0 0 u3 u2 u1 u0
270 "movd (%%ebx), %%mm1\n" // 4 Cr 0 0 0 0 v3 v2 v1 v0
  /external/valgrind/main/VEX/test/
mmxtest.c 200 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
201 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
202 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
203 #define movd(vars, vard) \ macro
204 __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
205 "movd %%mm0, %0" \
  /external/valgrind/main/none/tests/amd64/
insn_mmx.def 4 movd mm.sd[1234,5678] r64.sd[1111,2222] => 1.sd[1234,5678]
5 movd r64.sd[1234,5678] mm.sd[1111,2222] => 1.sd[1234,5678]
9 movd r32.sd[1234] mm.sd[1111,2222] => 1.sd[1234,0]
10 movd mm.sd[1234,2222] r32.sd[1111] => 1.sd[1234]
14 movd m32.sd[1234] mm.sd[1111,2222] => 1.sd[1234,0]
15 movd mm.sd[1234,2222] m32.sd[1111] => 1.sd[1234]
  /external/libffi/src/x86/
darwin64.S 165 movd %xmm0, %r10
166 movd %xmm1, %r11
297 movd %rcx, %xmm0
unix64.S 169 movd %xmm0, %r10
170 movd %xmm1, %r11
308 movd %rcx, %xmm0

Completed in 931 milliseconds

1 2 34 5 6 7 8