HomeSort by relevance Sort by last modified time
    Searched refs:rsi (Results 76 - 100 of 428) sorted by null

1 2 34 5 6 7 8 91011>>

  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_subpixel_8t_ssse3.asm 16 mov rsi, arg(0) ;src_ptr
45 mov rax, rsi
53 movd xmm0, [rsi] ;A
54 movd xmm1, [rsi + rdx] ;B
55 movd xmm2, [rsi + rdx * 2] ;C
57 movd xmm4, [rsi + rdx * 4] ;E
64 movd xmm6, [rsi + rbx] ;G
84 add rsi, rdx
103 mov rsi, arg(0) ;src_ptr
132 mov rax, rsi
    [all...]
vp9_subpixel_8t_sse2.asm 90 mov rsi, arg(0) ;src_ptr
131 movq xmm0, [rsi + %1] ;0
132 movq xmm1, [rsi + rax + %1] ;1
133 movq xmm6, [rsi + rdx * 2 + %1] ;6
134 lea rsi, [rsi + rax]
135 movq xmm7, [rsi + rdx * 2 + %1] ;7
136 movq xmm2, [rsi + rax + %1] ;2
137 movq xmm3, [rsi + rax * 2 + %1] ;3
138 movq xmm4, [rsi + rdx + %1] ;
    [all...]
  /external/openssl/crypto/aes/asm/
aesni-x86_64.S 19 movups %xmm2,(%rsi)
40 movups %xmm2,(%rsi)
469 movups %xmm2,(%rsi)
473 movups %xmm3,16(%rsi)
475 movups %xmm4,32(%rsi)
477 movups %xmm5,48(%rsi)
479 movups %xmm6,64(%rsi)
481 movups %xmm7,80(%rsi)
483 movups %xmm8,96(%rsi)
485 movups %xmm9,112(%rsi)
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_subpel_variance_impl_sse2.asm 30 push rsi
36 mov rsi, arg(0) ;ref_ptr ;
45 movdqu xmm5, XMMWORD PTR [rsi]
46 movdqu xmm3, XMMWORD PTR [rsi+1]
49 lea rsi, [rsi + rax]
52 movdqu xmm1, XMMWORD PTR [rsi] ;
53 movdqu xmm2, XMMWORD PTR [rsi+1] ;
79 lea rsi, [rsi + rax
    [all...]
vp9_variance_impl_sse2.asm 24 push rsi
65 pop rsi
88 push rsi
92 mov rsi, arg(0) ;[src_ptr]
100 prefetcht0 [rsi]
101 prefetcht0 [rsi+rax]
102 prefetcht0 [rsi+rax*2]
103 prefetcht0 [rsi+rcx]
104 lea rbx, [rsi+rax*4]
128 movdqu xmm1, XMMWORD PTR [rsi]
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_subpel_variance_impl_sse2.asm 30 push rsi
36 mov rsi, arg(0) ;ref_ptr ;
45 movdqu xmm5, XMMWORD PTR [rsi]
46 movdqu xmm3, XMMWORD PTR [rsi+1]
49 lea rsi, [rsi + rax]
52 movdqu xmm1, XMMWORD PTR [rsi] ;
53 movdqu xmm2, XMMWORD PTR [rsi+1] ;
79 lea rsi, [rsi + rax
    [all...]
vp9_variance_impl_sse2.asm 24 push rsi
65 pop rsi
88 push rsi
92 mov rsi, arg(0) ;[src_ptr]
100 prefetcht0 [rsi]
101 prefetcht0 [rsi+rax]
102 prefetcht0 [rsi+rax*2]
103 prefetcht0 [rsi+rcx]
104 lea rbx, [rsi+rax*4]
128 movdqu xmm1, XMMWORD PTR [rsi]
    [all...]
  /external/chromium_org/third_party/boringssl/linux-x86_64/crypto/bn/
x86_64-mont.S 15 cmpq %rsi,%rdx
42 movq (%rsi),%rax
57 movq 8(%rsi),%rax
67 movq (%rsi,%r15,8),%rax
88 movq (%rsi),%rax
120 movq 8(%rsi),%rax
131 movq (%rsi,%r15,8),%rax
154 movq (%rsi),%rax
176 leaq (%rsp),%rsi
182 movq 8(%rsi,%r14,8),%ra
    [all...]
  /external/chromium_org/third_party/boringssl/mac-x86_64/crypto/bn/
x86_64-mont.S 15 cmpq %rsi,%rdx
42 movq (%rsi),%rax
57 movq 8(%rsi),%rax
67 movq (%rsi,%r15,8),%rax
88 movq (%rsi),%rax
120 movq 8(%rsi),%rax
131 movq (%rsi,%r15,8),%rax
154 movq (%rsi),%rax
176 leaq (%rsp),%rsi
182 movq 8(%rsi,%r14,8),%ra
    [all...]
  /external/chromium_org/third_party/boringssl/win-x86_64/crypto/aes/
aesni-x86_64.asm 508 mov QWORD PTR[16+rsp],rsi
512 mov rsi,rdx
544 movups XMMWORD PTR[rsi],xmm2
548 movups XMMWORD PTR[16+rsi],xmm3
550 movups XMMWORD PTR[32+rsi],xmm4
552 movups XMMWORD PTR[48+rsi],xmm5
554 movups XMMWORD PTR[64+rsi],xmm6
556 movups XMMWORD PTR[80+rsi],xmm7
558 movups XMMWORD PTR[96+rsi],xmm8
560 movups XMMWORD PTR[112+rsi],xmm
    [all...]
  /external/chromium_org/third_party/boringssl/win-x86_64/crypto/bn/
rsaz-x86_64.asm 11 mov QWORD PTR[16+rsp],rsi
15 mov rsi,rdx
31 mov rdx,QWORD PTR[rsi]
32 mov rax,QWORD PTR[8+rsi]
43 mov rax,QWORD PTR[16+rsi]
48 mov rax,QWORD PTR[24+rsi]
54 mov rax,QWORD PTR[32+rsi]
60 mov rax,QWORD PTR[40+rsi]
66 mov rax,QWORD PTR[48+rsi]
72 mov rax,QWORD PTR[56+rsi]
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/x86/
variance_impl_ssse3.asm 39 push rsi
63 mov rsi, arg(0) ;ref_ptr
67 movdqu xmm0, XMMWORD PTR [rsi]
68 movdqu xmm1, XMMWORD PTR [rsi+1]
84 add rsi, dword ptr arg(1) ;ref_pixels_per_line
88 lea rsi, [rsi + r8]
92 movdqu xmm1, XMMWORD PTR [rsi]
93 movdqu xmm2, XMMWORD PTR [rsi+1]
137 add rsi, dword ptr arg(1) ;ref_pixels_per_lin
    [all...]
variance_impl_sse2.asm 26 push rsi
67 pop rsi
90 push rsi
94 mov rsi, arg(0) ;[src_ptr]
102 prefetcht0 [rsi]
103 prefetcht0 [rsi+rax]
104 prefetcht0 [rsi+rax*2]
105 prefetcht0 [rsi+rcx]
106 lea rbx, [rsi+rax*4]
130 movdqu xmm1, XMMWORD PTR [rsi]
    [all...]
sad_sse4.asm 16 movdqa xmm0, XMMWORD PTR [rsi]
37 movdqa xmm0, XMMWORD PTR [rsi]
60 movdqa xmm0, XMMWORD PTR [rsi + rax]
67 lea rsi, [rsi+rax*2]
88 movq xmm0, MMWORD PTR [rsi]
98 movq xmm0, MMWORD PTR [rsi]
110 movq xmm0, MMWORD PTR [rsi + rax]
115 lea rsi, [rsi+rax*2
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
variance_impl_ssse3.asm 39 push rsi
63 mov rsi, arg(0) ;ref_ptr
67 movdqu xmm0, XMMWORD PTR [rsi]
68 movdqu xmm1, XMMWORD PTR [rsi+1]
84 add rsi, dword ptr arg(1) ;ref_pixels_per_line
88 lea rsi, [rsi + r8]
92 movdqu xmm1, XMMWORD PTR [rsi]
93 movdqu xmm2, XMMWORD PTR [rsi+1]
137 add rsi, dword ptr arg(1) ;ref_pixels_per_lin
    [all...]
variance_impl_sse2.asm 26 push rsi
67 pop rsi
90 push rsi
94 mov rsi, arg(0) ;[src_ptr]
102 prefetcht0 [rsi]
103 prefetcht0 [rsi+rax]
104 prefetcht0 [rsi+rax*2]
105 prefetcht0 [rsi+rcx]
106 lea rbx, [rsi+rax*4]
130 movdqu xmm1, XMMWORD PTR [rsi]
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
variance_impl_ssse3.asm 39 push rsi
63 mov rsi, arg(0) ;ref_ptr
67 movdqu xmm0, XMMWORD PTR [rsi]
68 movdqu xmm1, XMMWORD PTR [rsi+1]
84 add rsi, dword ptr arg(1) ;ref_pixels_per_line
88 lea rsi, [rsi + r8]
92 movdqu xmm1, XMMWORD PTR [rsi]
93 movdqu xmm2, XMMWORD PTR [rsi+1]
137 add rsi, dword ptr arg(1) ;ref_pixels_per_lin
    [all...]
variance_impl_sse2.asm 26 push rsi
67 pop rsi
90 push rsi
94 mov rsi, arg(0) ;[src_ptr]
102 prefetcht0 [rsi]
103 prefetcht0 [rsi+rax]
104 prefetcht0 [rsi+rax*2]
105 prefetcht0 [rsi+rcx]
106 lea rbx, [rsi+rax*4]
130 movdqu xmm1, XMMWORD PTR [rsi]
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/
vp9_subpixel_8t_sse2.asm 90 mov rsi, arg(0) ;src_ptr
131 movq xmm0, [rsi + %1] ;0
132 movq xmm1, [rsi + rax + %1] ;1
133 movq xmm6, [rsi + rdx * 2 + %1] ;6
134 lea rsi, [rsi + rax]
135 movq xmm7, [rsi + rdx * 2 + %1] ;7
136 movq xmm2, [rsi + rax + %1] ;2
137 movq xmm3, [rsi + rax * 2 + %1] ;3
138 movq xmm4, [rsi + rdx + %1] ;
    [all...]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_subpixel_8t_sse2.asm 90 mov rsi, arg(0) ;src_ptr
131 movq xmm0, [rsi + %1] ;0
132 movq xmm1, [rsi + rax + %1] ;1
133 movq xmm6, [rsi + rdx * 2 + %1] ;6
134 lea rsi, [rsi + rax]
135 movq xmm7, [rsi + rdx * 2 + %1] ;7
136 movq xmm2, [rsi + rax + %1] ;2
137 movq xmm3, [rsi + rax * 2 + %1] ;3
138 movq xmm4, [rsi + rdx + %1] ;
    [all...]
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-linux.S 68 void* guest_state, // rsi
92 pushq %rsi ; \
101 popq %rsi ; \
116 movq %rdx, %rsi // sysmask
130 movq %rsi, %rax /* rax --> VexGuestAMD64State * */
133 movq OFFSET_amd64_RSI(%rax), %rsi
149 movq %rax, OFFSET_amd64_RAX(%rsi) /* save back to RAX */
158 movq %rcx, %rsi // postmask
  /external/openssl/crypto/bn/asm/
x86_64-mont5.S 60 movq (%rsi),%rax
90 movq 8(%rsi),%rax
100 movq (%rsi,%r15,8),%rax
123 movq (%rsi),%rax
170 movq 8(%rsi),%rax
181 movq (%rsi,%r15,8),%rax
206 movq (%rsi),%rax
228 leaq (%rsp),%rsi
234 movq 8(%rsi,%r14,8),%rax
241 andq %rax,%rsi
    [all...]
  /external/chromium_org/third_party/boringssl/src/crypto/md5/asm/
md5-x86_64.pl 22 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
29 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
46 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
54 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
72 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
76 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
94 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
103 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
141 # rsi = arg #2 (ptr, data pointer)
145 lea (%rsi,%rdx), %rdi # rdi = en
    [all...]
  /external/openssl/crypto/md5/asm/
md5-x86_64.pl 22 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
29 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
46 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
54 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
72 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
76 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
94 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
103 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
141 # rsi = arg #2 (ptr, data pointer)
145 lea (%rsi,%rdx), %rdi # rdi = en
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 16 movdqa xmm0, XMMWORD PTR [rsi]
37 movdqa xmm0, XMMWORD PTR [rsi]
60 movdqa xmm0, XMMWORD PTR [rsi + rax]
67 lea rsi, [rsi+rax*2]
88 movq xmm0, MMWORD PTR [rsi]
98 movq xmm0, MMWORD PTR [rsi]
110 movq xmm0, MMWORD PTR [rsi + rax]
115 lea rsi, [rsi+rax*2
    [all...]

Completed in 2921 milliseconds

1 2 34 5 6 7 8 91011>>