/external/libvpx/vp8/common/x86/ |
recon_mmx.asm | 19 push rsi 23 mov rsi, arg(0) ;s 29 movd mm1, [rsi] 35 movd mm2, [rsi+16] 41 movd mm3, [rsi+32] 48 movd mm4, [rsi+48] 56 pop rsi 73 push rsi 77 mov rsi, arg(0) ;src; 78 movq mm0, [rsi] [all...] |
recon_sse2.asm | 19 push rsi 23 mov rsi, arg(0) ;s 29 movq xmm1, MMWORD PTR [rsi] 36 movq xmm2, MMWORD PTR [rsi+8] 43 movq xmm3, MMWORD PTR [rsi+16] 50 movq xmm4, MMWORD PTR [rsi+24] 58 pop rsi 71 push rsi 75 mov rsi, arg(0) ;s 81 movdqa xmm1, XMMWORD PTR [rsi] [all...] |
subpixel_ssse3.asm | 43 push rsi 48 xor rsi, rsi 64 mov rsi, arg(0) ;src_ptr 73 movq xmm0, MMWORD PTR [rsi - 2] ; -2 -1 0 1 2 3 4 5 75 movq xmm2, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10 91 lea rsi, [rsi + rax] 108 pop rsi 121 mov rsi, arg(0) ;src_pt [all...] |
subpixel_sse2.asm | 42 push rsi 47 mov rsi, arg(0) ;src_ptr 59 movq xmm3, MMWORD PTR [rsi - 2] 60 movq xmm1, MMWORD PTR [rsi + 6] 62 prefetcht2 [rsi+rax-2] 118 lea rsi, [rsi + rax] 131 pop rsi 162 push rsi 167 mov rsi, arg(0) ;src_pt [all...] |
/external/compiler-rt/lib/x86_64/ |
floatundisf.S | 20 movq $1, %rsi 26 1: andq %rdi, %rsi 28 orq %rsi, %rdi
|
/frameworks/compile/libbcc/runtime/lib/x86_64/ |
floatundisf.S | 20 movq $1, %rsi 26 1: andq %rdi, %rsi 28 orq %rsi, %rdi
|
/external/llvm/test/MC/ELF/ |
cfi-reg.s | 9 .cfi_offset %rsi, -16 17 // CHECK: .cfi_offset %rsi, -16
|
/external/llvm/test/MC/X86/ |
address-size.s | 6 movb $0x0, (%rsi)
|
x86_errors.s | 30 movq (%rsi,%ecx),%xmm0
|
x86-64.s | 461 // CHECK: xorq %rsi, %rsi 466 clr %rsi 538 leaq 8(%eax), %rsi 539 // CHECK: leaq 8(%eax), %rsi 541 leaq 8(%rax), %rsi 542 // CHECK: leaq 8(%rax), %rsi 711 // CHECK: movzbq %al, %rsi 713 movzx %al, %rsi 956 mov (%rsi), %gs // CHECK: movl (%rsi), %gs # encoding: [0x8e,0x2e [all...] |
/external/libvpx/vp8/encoder/x86/ |
sad_sse2.asm | 24 push rsi 28 mov rsi, arg(0) ;src_ptr 34 lea rcx, [rsi+rax*8] 41 movq xmm0, QWORD PTR [rsi] 42 movq xmm2, QWORD PTR [rsi+8] 47 movq xmm4, QWORD PTR [rsi+rax] 55 movq xmm6, QWORD PTR [rsi+rax+8] 58 lea rsi, [rsi+rax*2] 69 cmp rsi, rc [all...] |
sad_mmx.asm | 29 push rsi 33 mov rsi, arg(0) ;src_ptr 39 lea rcx, [rsi+rax*8] 48 movq mm0, QWORD PTR [rsi] 49 movq mm2, QWORD PTR [rsi+8] 79 lea rsi, [rsi+rax] 85 cmp rsi, rcx 104 pop rsi 121 push rsi [all...] |
encodeopt.asm | 20 push rsi 24 mov rsi, arg(0) ;coeff_ptr 27 movdqa xmm3, [rsi] 30 movdqa xmm5, [rsi+16] 56 pop rsi 68 push rsi 73 mov rsi, arg(0) ;coeff_ptr 77 movq mm3, [rsi] 80 movq mm5, [rsi+8] 98 movq mm3, [rsi+16 [all...] |
subtract_mmx.asm | 22 push rsi 29 mov rsi, arg(0) ;z 34 movd mm0, [rsi] 42 movd mm0, [rsi+rdx] 50 movd mm0, [rsi+rdx*2] 57 lea rsi, [rsi+rdx*2] 62 movd mm0, [rsi+rdx] 71 pop rsi 82 push rsi [all...] |
variance_impl_ssse3.asm | 39 push rsi 63 mov rsi, arg(0) ;ref_ptr 67 movdqu xmm0, XMMWORD PTR [rsi] 68 movdqu xmm1, XMMWORD PTR [rsi+1] 84 add rsi, dword ptr arg(1) ;ref_pixels_per_line 88 lea rsi, [rsi + r8] 92 movdqu xmm1, XMMWORD PTR [rsi] 93 movdqu xmm2, XMMWORD PTR [rsi+1] 137 add rsi, dword ptr arg(1) ;ref_pixels_per_lin [all...] |
variance_impl_sse2.asm | 26 push rsi 67 pop rsi 89 push rsi 93 mov rsi, arg(0) ;[src_ptr] 101 prefetcht0 [rsi] 102 prefetcht0 [rsi+rax] 103 prefetcht0 [rsi+rax*2] 104 prefetcht0 [rsi+rcx] 105 lea rbx, [rsi+rax*4] 129 movdqu xmm1, XMMWORD PTR [rsi] [all...] |
subtract_sse2.asm | 23 push rsi 29 mov rsi, arg(0) ;z 34 movd mm0, [rsi] 41 movd mm0, [rsi+rdx] 48 movd mm0, [rsi+rdx*2] 55 lea rsi, [rsi+rdx*2] 58 movd mm0, [rsi+rdx] 67 pop rsi 82 push rsi [all...] |
sad_sse4.asm | 16 movdqa xmm0, XMMWORD PTR [rsi] 37 movdqa xmm0, XMMWORD PTR [rsi] 60 movdqa xmm0, XMMWORD PTR [rsi + rax] 67 lea rsi, [rsi+rax*2] 88 movq xmm0, MMWORD PTR [rsi] 98 movq xmm0, MMWORD PTR [rsi] 110 movq xmm0, MMWORD PTR [rsi + rax] 115 lea rsi, [rsi+rax*2 [all...] |
quantize_ssse3.asm | 25 push rsi 31 mov rsi, arg(4) ;quant_ptr 51 pmulhw xmm1, [rsi] 52 pmulhw xmm5, [rsi + 16] 56 mov rsi, arg(5) ;dqcoeff_ptr 95 movdqa [rsi], xmm2 ;store dqcoeff 96 movdqa [rsi + 16], xmm3 ;store dqcoeff 105 pop rsi
|
/external/valgrind/main/none/tests/amd64/ |
asorep.c | 10 unsigned long rdi, rsi, rcx, rax; local 20 : "=D" (rdi), "=S" (rsi), "=c" (rcx) 25 || rsi != (uintptr_t) buf1 + 4 41 : "=S" (rsi), "=a" (rax) 43 if (rsi != (uintptr_t) buf2 + 4 50 : "=D" (rdi), "=S" (rsi), "=c" (rcx) 53 || rsi != (uintptr_t) buf1 + 15
|
/external/valgrind/main/coregrind/m_syswrap/ |
syscall-amd64-linux.S | 68 void* guest_state, // rsi 92 pushq %rsi ; \ 101 popq %rsi ; \ 116 movq %rdx, %rsi // sysmask 130 movq %rsi, %rax /* rax --> VexGuestAMD64State * */ 133 movq OFFSET_amd64_RSI(%rax), %rsi 149 movq %rax, OFFSET_amd64_RAX(%rsi) /* save back to RAX */ 158 movq %rcx, %rsi // postmask
|
/external/openssl/crypto/bn/asm/ |
modexp512-x86_64.pl | 343 # 512-bit mul source: [rsi+8*n] 346 # Clobbers all regs except: rcx, rsi, rdi 352 &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx"); 356 &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx"); 425 mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rsi # pM1 (Bsrc) 512 bits, 8 qwords 426 add \$$M1, %rsi 431 &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times 460 # rsi -> M1 467 add \$`$M2-$M1`, %rsi # rsi -> M [all...] |
/external/zlib/src/contrib/masmx64/ |
inffasx64.asm | 32 mov [rsp-8],rsi
47 mov rsi, [rsp+16] ; /* rsi = in */
64 cmp r9, rsi
85 cmp r9, rsi
190 mov r8, rsi ; /* save in so from can use it's reg */
198 mov rsi, rdi
199 sub rsi, r15 ; /* from = out - dist */
205 mov al, [rsi]
209 mov rsi, r8 ; /* move in back to %rsi, toss from */ [all...] |
/external/compiler-rt/lib/tsan/rtl/ |
tsan_rtl_amd64.S | 16 push %rsi 18 .cfi_rel_offset %rsi, 0 61 pop %rsi 73 .cfi_restore %rsi 95 push %rsi 97 .cfi_rel_offset %rsi, 0 140 pop %rsi 152 .cfi_restore %rsi
|
/external/llvm/test/MC/COFF/ |
seh.s | 31 movq %rsi, 16(%rsp) 32 .seh_savereg %rsi, 16
|