/external/llvm/test/CodeGen/X86/ |
2008-02-26-AsmDirectMemOp.ll | 15 tail call void asm sideeffect "movd $1, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09movd $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0A\09packssdw %mm5, %mm5 \0A\09psubw %mm5, %mm7 \0A\09pxor %mm4, %mm4 \0A\09.align 1<<4\0A\091: \0A\09movq ($0, $3), %mm0 \0A\09movq 8($0, $3), %mm1 \0A\09pmullw %mm6, %mm0 \0A\09pmullw %mm6, %mm1 \0A\09movq ($0, $3), %mm2 \0A\09movq 8($0, $3), %mm3 \0A\09pcmpgtw %mm4, %mm2 \0A\09pcmpgtw %mm4, %mm3 \0A\09pxor %mm2, %mm0 \0A\09pxor %mm3, %mm1 \0A\09paddw %mm7, %mm0 \0A\09paddw %mm7, %mm1 \0A\09pxor %mm0, %mm2 \0A\09pxor %mm1, %mm3 \0A\09pcmpeqw %mm7, %mm0 \0A\09pcmpeqw %mm7, %mm1 \0A\09pandn %mm2, %mm0 \0A\09pandn %mm3, %mm1 \0A\09movq %mm0, ($0, $3) \0A\09movq %mm1, 8($0, $3) \0A\09add $$16, $3 \0A\09jng 1b \0A\09", "r,imr,imr,r,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* null, i32 %tmp1, i32 0, i32 0 ) nounwind
|
vec_shuffle-14.ll | 2 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 1 3 ; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movd | count 2
|
2009-06-05-VZextByteShort.ll | 4 ; RUN: grep movd %t1 | count 4
|
extractelement-load.ll | 6 ; CHECK-NOT: movd
|
extractps.ll | 2 ; RUN: not grep movd %t
|
isint.ll | 11 ; CHECK-NEXT: movd
|
opt-shuff-tstore.ll | 5 ; CHECK: movd
|
vec_set-2.ll | 2 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 1
|
vshift-3.ll | 30 ; CHECK: movd 54 ; CHECK: movd
|
asm-reg-type-mismatch.ll | 30 ; CHECK: movd %xmm7, %rax
|
mmx-bitcast-to-i64.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movd | count 4
|
vec_shift3.ll | 3 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 2
|
/external/zlib/src/contrib/masmx86/ |
inffas32.asm | 640 movd mm0,ebp
643 movd mm4,dword ptr [esp+0]
645 movd mm5,dword ptr [esp+4]
658 movd mm6,ebp
659 movd mm7,dword ptr [esi]
667 movd eax,mm4
673 movd mm1,ecx
703 movd mm1,eax
704 movd ecx,mm0
715 movd mm6,ebp [all...] |
/external/flac/libFLAC/ia32/ |
fixed_asm.nasm | 108 movd mm3, [ebx - 4] ; mm3 = 0:last_error_0 109 movd mm2, [ebx - 8] ; mm2 = 0:data[-2] 110 movd mm1, [ebx - 12] ; mm1 = 0:data[-3] 111 movd mm0, [ebx - 16] ; mm0 = 0:data[-4] 128 movd mm7, [ebx] ; mm7 = 0:error_0 172 movd edi, mm2 ; edi = total_error_4 173 movd esi, mm1 ; esi = total_error_3 174 movd eax, mm0 ; eax = total_error_0 177 movd edx, mm1 ; edx = total_error_2 178 movd ecx, mm3 ; ecx = total_error_ [all...] |
lpc_asm.nasm | 660 movd mm0, [eax] 661 movd mm2, [eax + 4] 662 movd mm4, [eax + 8] 663 movd mm6, [eax + 12] 672 movd mm1, [eax + 4 * ebx] 673 movd mm3, [eax + 4 * ebx + 4] 674 movd mm5, [eax + 4 * ebx + 8] 675 movd mm7, [eax + 4 * ebx + 12] 703 movd mm0, [esi] 711 movd mm1, [esi + 4 * ebx [all...] |
/external/libvpx/libvpx/vp8/common/x86/ |
variance_impl_mmx.asm | 455 movd mm0, [rax] ; Copy eight bytes to mm0 456 movd mm1, [rbx] ; Copy eight bytes to mm1 463 movd mm1, [rbx] ; Copy eight bytes to mm1 467 movd mm0, [rax] ; Copy eight bytes to mm0 474 movd mm1, [rbx] ; Copy eight bytes to mm1 478 movd mm0, [rax] ; Copy eight bytes to mm0 486 movd mm1, [rbx] ; Copy eight bytes to mm1 490 movd mm0, [rax] ; Copy eight bytes to mm0 549 movd mm1, [rsi] ; 550 movd mm3, [rsi+1] [all...] |
variance_impl_sse2.asm | 202 movd DWORD PTR [rax], xmm7 203 movd DWORD PTR [rdi], xmm1 391 movd DWORD PTR [rdi], xmm1 669 movd [rsi], mm2 ; xsum 670 movd [rdi], mm4 ; xxsum 792 movd [rsi], mm2 ; 793 movd [rdi], mm4 ; 908 movd [rsi], xmm0 909 movd [rdi], xmm6 [all...] |
mfqe_sse2.asm | 33 movd xmm0, arg(4) ; src_weight 110 movd xmm0, arg(4) ; src_weight 231 movd [rax], xmm0 260 movd [rax], xmm1
|
loopfilter_sse2.asm | [all...] |
postproc_sse2.asm | 528 ;movd xmm7, rax 529 movd xmm7, edx 532 ;movd xmm6, rax 533 movd xmm6, ecx 546 movd xmm1, DWORD PTR [rsi+rcx-8] ; -8 -7 -6 -5 547 movd xmm2, DWORD PTR [rsi+rcx+7] ; +7 +8 +9 +10 603 movd xmm1, DWORD PTR [rsi+rcx] 621 movd [rsi+rcx-8], mm0
|
subpixel_mmx.asm | 79 movd mm4, [rsi+3] 188 movd [rdi],mm3 ; store the results in the destination 565 movd mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 569 movd mm5, [rsi+1] ; 584 movd mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 588 movd mm5, [rsi+1] ; 614 movd [rdi], mm3 ; store the results in the destination
|
subpixel_ssse3.asm | 345 movd DWORD PTR [rdi], xmm0 390 movd DWORD PTR [rdi], xmm1 781 movd mm1, DWORD PTR [rsi] ;A 782 movd mm2, DWORD PTR [rsi + rdx] ;B 783 movd mm3, DWORD PTR [rsi + rdx * 2] ;C 784 movd mm4, DWORD PTR [rax + rdx * 2] ;D 785 movd mm0, DWORD PTR [rsi + rdx * 4] ;E 790 movd mm0, DWORD PTR [rax + rdx * 4] ;F 808 movd DWORD PTR [rdi], mm2 837 movd mm2, DWORD PTR [rsi + rdx] ; [all...] |
/external/libvpx/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 83 pxor mm1, mm1 ; from movd mm1, dc ; dc =0 145 movd mm1, dword ptr arg(2) ;dc 223 movd xmm5, dword ptr arg(2) ;dc
|
temporal_filter_apply_sse2.asm | 46 movd xmm6, arg(4) 53 movd xmm4, rdx ; can't use rdx w/ shift 64 movd xmm0, arg(5) ; filter_weight
|
/external/llvm/lib/Target/X86/ |
X86InstrMMX.td | 197 "movd\t{$src, $dst|$dst, $src}", 203 "movd\t{$src, $dst|$dst, $src}", 209 "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>; 215 "movd\t{$src, $dst|$dst, $src}", 221 "movd\t{$src, $dst|$dst, $src}", 226 // movd. 229 "movd\t{$src, $dst|$dst, $src}", 233 "movd\t{$src, $dst|$dst, $src}", 281 // movd to MMX register zero-extends 283 "movd\t{$src, $dst|$dst, $src}" [all...] |