HomeSort by relevance Sort by last modified time
    Searched refs:rdi (Results 51 - 75 of 484) sorted by null

1 23 4 5 6 7 8 91011>>

  /external/llvm/test/MC/MachO/
tlv-reloc.s 20 movq _a@TLVP(%rip), %rdi
21 call *(%rdi) # returns &a in %rax
  /bionic/libc/arch-x86_64/string/
sse2-memmove-slm.S 94 mov %rdi, %rax
97 cmp %rsi, %rdi
112 movdqu %xmm0, (%rdi)
113 movdqu %xmm1, -16(%rdi, %rdx)
125 movdqu %xmm0, (%rdi)
126 movdqu %xmm1, 16(%rdi)
127 movdqu %xmm2, -16(%rdi, %rdx)
128 movdqu %xmm3, -32(%rdi, %rdx)
144 movdqu %xmm0, (%rdi)
145 movdqu %xmm1, 16(%rdi)
    [all...]
sse4-memcmp-slm.S 94 add %rdx, %rdi
100 movzbl (%rdi), %eax
109 movdqu (%rdi), %xmm2
118 sub %rcx, %rdi
120 test $0xf, %rdi
128 movdqu (%rdi), %xmm2
133 movdqu 16(%rdi), %xmm2
138 movdqu 32(%rdi), %xmm2
143 movdqu 48(%rdi), %xmm2
150 movdqu 64(%rdi), %xmm
    [all...]
  /external/boringssl/linux-x86_64/crypto/sha/
sha512-x86_64.S 33 movq %rdi,128+0(%rsp)
39 movq 0(%rdi),%rax
40 movq 8(%rdi),%rbx
41 movq 16(%rdi),%rcx
42 movq 24(%rdi),%rdx
43 movq 32(%rdi),%r8
44 movq 40(%rdi),%r9
45 movq 48(%rdi),%r10
46 movq 56(%rdi),%r11
51 movq %rbx,%rdi
    [all...]
  /external/boringssl/mac-x86_64/crypto/sha/
sha512-x86_64.S 32 movq %rdi,128+0(%rsp)
38 movq 0(%rdi),%rax
39 movq 8(%rdi),%rbx
40 movq 16(%rdi),%rcx
41 movq 24(%rdi),%rdx
42 movq 32(%rdi),%r8
43 movq 40(%rdi),%r9
44 movq 48(%rdi),%r10
45 movq 56(%rdi),%r11
50 movq %rbx,%rdi
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
ssim_opt_x86_64.asm 71 push rdi
76 mov rdi, arg(2) ;r
91 movdqu xmm6, [rdi]
107 add rdi, rax ; next r row
118 mov rdi,arg(4)
119 movd [rdi], xmm15;
120 mov rdi,arg(5)
121 movd [rdi], xmm14;
122 mov rdi,arg(6)
123 movd [rdi], xmm13
    [all...]
sad_sse4.asm 17 movq xmm1, MMWORD PTR [rdi]
18 movq xmm3, MMWORD PTR [rdi+8]
19 movq xmm2, MMWORD PTR [rdi+16]
38 movq xmm5, MMWORD PTR [rdi]
39 movq xmm3, MMWORD PTR [rdi+8]
40 movq xmm2, MMWORD PTR [rdi+16]
61 movq xmm5, MMWORD PTR [rdi+ rdx]
62 movq xmm3, MMWORD PTR [rdi+ rdx+8]
63 movq xmm2, MMWORD PTR [rdi+ rdx+16]
68 lea rdi, [rdi+rdx*2
    [all...]
vpx_subpixel_bilinear_sse2.asm 16 mov rdi, arg(2) ;output_ptr
50 movd xmm1, [rdi]
54 movd [rdi], xmm0
56 lea rdi, [rdi + rdx]
63 mov rdi, arg(2) ;output_ptr
94 movq xmm1, [rdi]
97 movq [rdi], xmm0 ;store the result
100 lea rdi, [rdi + rdx
    [all...]
vpx_subpixel_bilinear_ssse3.asm 16 mov rdi, arg(2) ;output_ptr
41 movd xmm1, [rdi]
44 movd [rdi], xmm0
46 lea rdi, [rdi + rdx]
53 mov rdi, arg(2) ;output_ptr
79 movq xmm1, [rdi]
82 movq [rdi], xmm0 ;store the result
85 lea rdi, [rdi + rdx
    [all...]
sad_ssse3.asm 17 lddqu xmm5, XMMWORD PTR [rdi]
18 lddqu xmm6, XMMWORD PTR [rdi+1]
19 lddqu xmm7, XMMWORD PTR [rdi+2]
26 lddqu xmm1, XMMWORD PTR [rdi]
27 lddqu xmm2, XMMWORD PTR [rdi+1]
28 lddqu xmm3, XMMWORD PTR [rdi+2]
39 lddqu xmm1, XMMWORD PTR [rdi+rdx]
40 lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
41 lddqu xmm3, XMMWORD PTR [rdi+rdx+2]
44 lea rdi, [rdi+rdx*2
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 17 movq xmm1, MMWORD PTR [rdi]
18 movq xmm3, MMWORD PTR [rdi+8]
19 movq xmm2, MMWORD PTR [rdi+16]
38 movq xmm5, MMWORD PTR [rdi]
39 movq xmm3, MMWORD PTR [rdi+8]
40 movq xmm2, MMWORD PTR [rdi+16]
61 movq xmm5, MMWORD PTR [rdi+ rdx]
62 movq xmm3, MMWORD PTR [rdi+ rdx+8]
63 movq xmm2, MMWORD PTR [rdi+ rdx+16]
68 lea rdi, [rdi+rdx*2
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
quantize_ssse3.asm 27 push rdi
31 push rdi
38 mov rdi, arg(0) ; BLOCK *b
42 mov rdi, rcx ; BLOCK *b
45 ;mov rdi, rdi ; BLOCK *b
50 mov rax, [rdi + vp8_block_coeff]
51 mov rcx, [rdi + vp8_block_round]
52 mov rdx, [rdi + vp8_block_quant_fast]
80 mov rdi, [rsi + vp8_blockd_dequant
    [all...]
subtract_mmx.asm 23 push rdi
27 mov rdi, arg(2) ;diff
39 movq [rdi], mm0
47 movq [rdi+rcx*2],mm0
55 movq [rdi+rcx*4], mm0
67 movq [rdi+rcx*2], mm0
70 pop rdi
84 push rdi
87 mov rdi, arg(0) ;diff
114 movq [rdi], mm
    [all...]
encodeopt.asm 21 push rdi
25 mov rdi, arg(1) ;dcoef_ptr
28 movdqa xmm1, [rdi]
31 movdqa xmm3, [rdi+16]
55 pop rdi
69 push rdi
76 mov rdi, arg(1) ;dcoef_ptr
79 movq mm4, [rdi]
82 movq mm6, [rdi+8]
100 movq mm4, [rdi+16
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ilp32/
x86-64-rep.d 10 0: f3 6c[ ]+rep insb \(%dx\),%es:\(%rdi\)
12 4: f3 a4[ ]+rep movsb %ds:\(%rsi\),%es:\(%rdi\)
14 8: f3 aa[ ]+rep stos %al,%es:\(%rdi\)
15 a: f3 a6[ ]+repz cmpsb %es:\(%rdi\),%ds:\(%rsi\)
16 c: f3 ae[ ]+repz scas %es:\(%rdi\),%al
17 e: 66 f3 6d[ ]+rep insw \(%dx\),%es:\(%rdi\)
19 14: 66 f3 a5[ ]+rep movsw %ds:\(%rsi\),%es:\(%rdi\)
21 1a: 66 f3 ab[ ]+rep stos %ax,%es:\(%rdi\)
22 1d: 66 f3 a7[ ]+repz cmpsw %es:\(%rdi\),%ds:\(%rsi\)
23 20: 66 f3 af[ ]+repz scas %es:\(%rdi\),%a
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-rep.d 9 0: f3 6c[ ]+rep insb \(%dx\),%es:\(%rdi\)
11 4: f3 a4[ ]+rep movsb %ds:\(%rsi\),%es:\(%rdi\)
13 8: f3 aa[ ]+rep stos %al,%es:\(%rdi\)
14 a: f3 a6[ ]+repz cmpsb %es:\(%rdi\),%ds:\(%rsi\)
15 c: f3 ae[ ]+repz scas %es:\(%rdi\),%al
16 e: 66 f3 6d[ ]+rep insw \(%dx\),%es:\(%rdi\)
18 14: 66 f3 a5[ ]+rep movsw %ds:\(%rsi\),%es:\(%rdi\)
20 1a: 66 f3 ab[ ]+rep stos %ax,%es:\(%rdi\)
21 1d: 66 f3 a7[ ]+repz cmpsw %es:\(%rdi\),%ds:\(%rsi\)
22 20: 66 f3 af[ ]+repz scas %es:\(%rdi\),%a
    [all...]
  /external/zlib/src/contrib/amd64/
amd64-match.S 60 * deflate_state structure) is passed in %rdi, hence our offsets below are
76 printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s)));
77 printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s)));
78 printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s)));
79 printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s)));
80 printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s)));
81 printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s)));
82 printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s)));
83 printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s)));
84 printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s)))
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
recon_sse2.asm 26 push rdi
33 mov rdi, arg(2) ;dst;
41 movdqa [rdi], xmm0
44 movdqa [rdi+rcx], xmm1
45 movdqa [rdi+rcx*2],xmm2
47 lea rdi, [rdi+rcx*2]
50 add rdi, rcx
56 movdqa [rdi], xmm3
59 movdqa [rdi+rcx], xmm
    [all...]
sad_ssse3.asm 17 lddqu xmm5, XMMWORD PTR [rdi]
18 lddqu xmm6, XMMWORD PTR [rdi+1]
19 lddqu xmm7, XMMWORD PTR [rdi+2]
26 lddqu xmm1, XMMWORD PTR [rdi]
27 lddqu xmm2, XMMWORD PTR [rdi+1]
28 lddqu xmm3, XMMWORD PTR [rdi+2]
39 lddqu xmm1, XMMWORD PTR [rdi+rdx]
40 lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
41 lddqu xmm3, XMMWORD PTR [rdi+rdx+2]
44 lea rdi, [rdi+rdx*2
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_subpixel_bilinear_sse2.asm 16 mov rdi, arg(2) ;output_ptr
50 movd xmm1, [rdi]
54 movd [rdi], xmm0
56 lea rdi, [rdi + rdx]
63 mov rdi, arg(2) ;output_ptr
94 movq xmm1, [rdi]
97 movq [rdi], xmm0 ;store the result
100 lea rdi, [rdi + rdx
    [all...]
vp9_subpixel_bilinear_ssse3.asm 16 mov rdi, arg(2) ;output_ptr
41 movd xmm1, [rdi]
44 movd [rdi], xmm0
46 lea rdi, [rdi + rdx]
53 mov rdi, arg(2) ;output_ptr
79 movq xmm1, [rdi]
82 movq [rdi], xmm0 ;store the result
85 lea rdi, [rdi + rdx
    [all...]
  /external/boringssl/win-x86_64/crypto/bn/
x86_64-mont5.asm 14 mov QWORD[8+rsp],rdi ;WIN64 prologue
18 mov rdi,rcx
253 mov QWORD[r14*8+rdi],rax
265 mov rcx,QWORD[r14*8+rdi]
270 mov QWORD[r14*8+rdi],rsi
287 mov rdi,QWORD[8+rsp] ;WIN64 epilogue
294 mov QWORD[8+rsp],rdi ;WIN64 prologue
298 mov rdi,rcx
369 mov rdi,QWORD[8+rsp] ;WIN64 epilogue
420 mov QWORD[((56+8))+rsp],rdi
    [all...]
  /external/boringssl/linux-x86_64/crypto/rand/
rdrand-x86_64.S 18 movq %rcx,0(%rdi)
38 movq %rcx,0(%rdi)
39 addq %rdx,%rdi
  /external/boringssl/mac-x86_64/crypto/rand/
rdrand-x86_64.S 18 movq %rcx,0(%rdi)
38 movq %rcx,0(%rdi)
39 addq %rdx,%rdi
  /external/libvpx/libvpx/vp8/encoder/x86/
encodeopt.asm 21 push rdi
25 mov rdi, arg(1) ;dcoef_ptr
28 movdqa xmm1, [rdi]
31 movdqa xmm3, [rdi+16]
55 pop rdi
69 push rdi
76 mov rdi, arg(1) ;dcoef_ptr
79 movq mm4, [rdi]
82 movq mm6, [rdi+8]
100 movq mm4, [rdi+16
    [all...]

Completed in 1764 milliseconds

1 23 4 5 6 7 8 91011>>