HomeSort by relevance Sort by last modified time
    Searched refs:r8 (Results 1 - 25 of 535) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/
bswap64.asm 3 bswap r8
mem64-err.asm 7 mov ah, [r8]
  /external/linux-tools-perf/perf-3.12.0/arch/avr32/lib/
memcpy.S 31 3: ld.w r8, r11++
33 st.w r12++, r8
42 ld.ub r8, r11++
43 st.b r12++, r8
44 ld.ub r8, r11++
45 st.b r12++, r8
46 ld.ub r8, r11++
47 st.b r12++, r8
57 ld.ub r8, r11++
58 st.b r12++, r8
    [all...]
memset.S 28 mov r8, r12
38 4: st.w r8++, r11
54 st.b r8++, r11
55 st.b r8++, r11
58 6: st.b r8++, r11
67 st.b r8++, r11
68 st.b r8++, r11
69 st.b r8++, r11
  /bionic/libc/arch-x86_64/string/
sse2-memcpy-slm.S 129 lea 64(%rdi), %r8
130 and $-64, %r8
140 cmp %r8, %rdx
144 cmp %r8, %rdx
151 prefetcht0 128(%r8, %rsi)
153 movdqu (%r8, %rsi), %xmm0
154 movdqu 16(%r8, %rsi), %xmm1
155 movdqu 32(%r8, %rsi), %xmm2
156 movdqu 48(%r8, %rsi), %xmm3
157 movdqa %xmm0, (%r8)
    [all...]
sse2-memmove-slm.S 165 lea 64(%rdi), %r8
166 and $-64, %r8 /* r8 now aligned to next 64 byte boundary */
169 movdqu (%r8, %rsi), %xmm4
170 movdqu 16(%r8, %rsi), %xmm5
171 movdqu 32(%r8, %rsi), %xmm6
172 movdqu 48(%r8, %rsi), %xmm7
178 movdqa %xmm4, (%r8)
179 movaps %xmm5, 16(%r8)
180 movaps %xmm6, 32(%r8)
    [all...]
sse2-strcat-slm.S 68 mov %rdx, %r8
83 test %r8, %r8
  /external/linux-tools-perf/perf-3.12.0/arch/arm/lib/
memset.S 34 * We need 2 extra registers for this loop - use r8 and the LR
36 stmfd sp!, {r8, lr}
37 mov r8, r1
41 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
42 stmgeia ip!, {r1, r3, r8, lr}
43 stmgeia ip!, {r1, r3, r8, lr}
44 stmgeia ip!, {r1, r3, r8, lr}
46 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
51 stmneia ip!, {r1, r3, r8, lr}
52 stmneia ip!, {r1, r3, r8, lr
    [all...]
  /external/libhevc/common/arm/
ihevc_intra_pred_luma_mode_18_34.s 129 mov r8,r0
133 vld1.8 {d0},[r8],r6
135 vld1.8 {d1},[r8],r6
137 vld1.8 {d2},[r8],r6
138 vld1.8 {d3},[r8],r6
140 vld1.8 {d4},[r8],r6
141 vld1.8 {d5},[r8],r6
142 vld1.8 {d6},[r8],r6
144 vld1.8 {d7},[r8],r6
152 movne r8,r
    [all...]
ihevc_intra_pred_chroma_mode_18_34.s 128 mov r8,r0
134 vld1.8 {d0,d1},[r8],r6
136 vld1.8 {d2,d3},[r8],r6
138 vld1.8 {d4,d5},[r8],r6
140 vld1.8 {d6,d7},[r8],r6
142 vld1.8 {d8,d9},[r8],r6
144 vld1.8 {d10,d11},[r8],r6
146 vld1.8 {d12,d13},[r8],r6
148 vld1.8 {d14,d15},[r8],r6
155 add r8,r0,#1
    [all...]
ihevc_intra_pred_luma_mode2.s 110 mov r8,#-2
122 vld1.8 {d0},[r0],r8
125 vld1.8 {d1},[r10],r8
128 vld1.8 {d2},[r0],r8
129 vld1.8 {d3},[r10],r8
132 vld1.8 {d4},[r0],r8
133 vld1.8 {d5},[r10],r8
134 vld1.8 {d6},[r0],r8
137 vld1.8 {d7},[r10],r8
178 vld1.8 {d0},[r0],r8
    [all...]
  /external/openssl/crypto/bn/asm/
ppc.pl 231 #.set r8,8
275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
278 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
298 $UMULH r8,r5,r6
300 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
301 adde r8,r8,r8
306 addze r11,r8 # r8 added to r11 which is 0
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
    [all...]
loopfilter_v6.asm 92 uqsub8 r8, r10, r11 ; p2 - p1
96 orr r8, r8, r10 ; abs (p2-p1)
98 uqsub8 r8, r8, r2 ; compare to limit
100 orr lr, lr, r8
106 uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later
159 orr r10, r6, r8 ; calculate vp8_hevmask
167 ldr r8, [src], pstep ; p
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
    [all...]
loopfilter_v6.asm 92 uqsub8 r8, r10, r11 ; p2 - p1
96 orr r8, r8, r10 ; abs (p2-p1)
98 uqsub8 r8, r8, r2 ; compare to limit
100 orr lr, lr, r8
106 uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later
159 orr r10, r6, r8 ; calculate vp8_hevmask
167 ldr r8, [src], pstep ; p
    [all...]
  /external/aac/libFDK/src/arm/
dct_arm.cpp 109 r8 val_tw
117 LDR r8, [r1], #4 // val_tw = *twiddle++; local
121 SMULWT r9, r5, r8 // accuX = accu2*val_tw.l
122 SMULWB r5, r5, r8 // accu2 = accu2*val_tw.h
124 SMLAWT r5, r4, r8, r5 // accu2 = accu2*val_tw.h + accu1*val_tw.l local
125 SMLAWB r4, r4, r8, r9 // accu1 = accu1*val_tw.h - accu2*val_tw.l local
127 LDR r8, [r1], #4 // val_tw = *twiddle++; local
131 SMULWB r9, r7, r8 // accuX = accu4*val_tw.h
132 SMULWT r7, r7, r8 // accu4 = accu4*val_tw.l
134 SMLAWB r7, r6, r8, r7 // accu4 = accu4*val_tw.l+accu3*val_tw. local
135 SMLAWT r6, r6, r8, r9 \/\/ accu3 = accu3*val_tw.l-accu4*val_tw.h local
150 SMLAWT r5, r4, r8, r5 \/\/ accu2 = accu2*val_tw.h + accu1*val_tw.l local
151 SMLAWB r4, r4, r8, r9 \/\/ accu1 = accu1*val_tw.h - accu2*val_tw.l local
153 LDR r8, [r1], #4 \/\/ val_tw = *twiddle++; local
160 SMLAWB r7, r6, r8, r7 \/\/ accu4 = accu4*val_tw.l+accu3*val_tw.h local
161 SMLAWT r6, r6, r8, r9 \/\/ accu3 = accu3*val_tw.l-accu4*val_tw.h local
294 LDR r8, [r1], #4 \/\/ val_tw = *twiddle++ local
301 SMLAWB r9, r4, r8, r9 \/\/ accuX = accu1*val_tw.h-(-accu2)*val_tw.l local
304 SMLAWB r5, r5, r8, r4 \/\/ accu2 = (-accu2)*val_tw.t+accu1*val_tw.l local
305 LDR r8, [r1], #4 \/\/ val_tw = *twiddle++ local
312 SMLAWT r6, r6, r8, r5 \/\/ accu3 = (-accu4)*val_tw.l-(-accu3)*val_tw.h local
313 SMLAWT r7, r7, r8, r4 \/\/ accu4 = (-accu3)*val_tw.l+(-accu4)*val_tw.h local
317 LDR r8, [r1], #4 \/\/ val_tw = *twiddle++ local
324 SMLAWB r9, r4, r8, r9 \/\/ accuX = accu1*val_tw.h-(-accu2)*val_tw.l local
327 SMLAWB r5, r5, r8, r4 \/\/ accu2 = (-accu2)*val_tw.t+accu1*val_tw.l local
328 LDR r8, [r1], #4 \/\/ val_tw = *twiddle++ local
335 SMLAWT r6, r6, r8, r5 \/\/ accu3 = (-accu4)*val_tw.l-(-accu3)*val_tw.h local
336 SMLAWT r7, r7, r8, r4 \/\/ accu4 = (-accu3)*val_tw.l+(-accu4)*val_tw.h local
    [all...]
  /external/jpeg/
armv6_idct.S 36 * 16-bit constants; r12 and r14 hold two of the four arguments; and r8 hold
38 * first pass, r8 to r11 are used to hold quantization values, so the loop
77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14}
89 ldmdb r12!, {r8, r9, r10, r11}
102 mul r4, r8, r4
106 orreqs r8, r1, r2
107 orreqs r8, r3, r5
108 orreqs r8, r6, r7
121 ldmdb r12!, {r8, r9, r10, r11}
132 mul r7, r8, r
    [all...]
  /external/chromium_org/third_party/boringssl/linux-x86_64/crypto/modes/
ghash-x86_64.S 15 movzbq 15(%rdi),%r8
23 movq 8(%rsi,%rax,1),%r8
26 movq %r8,%rdx
31 shrq $4,%r8
36 xorq 8(%rsi,%rbx,1),%r8
41 movq %r8,%rdx
43 xorq %r10,%r8
47 shrq $4,%r8
51 xorq 8(%rsi,%rax,1),%r8
56 movq %r8,%rd
    [all...]
  /external/chromium_org/third_party/boringssl/mac-x86_64/crypto/modes/
ghash-x86_64.S 15 movzbq 15(%rdi),%r8
23 movq 8(%rsi,%rax,1),%r8
26 movq %r8,%rdx
31 shrq $4,%r8
36 xorq 8(%rsi,%rbx,1),%r8
41 movq %r8,%rdx
43 xorq %r10,%r8
47 shrq $4,%r8
51 xorq 8(%rsi,%rax,1),%r8
56 movq %r8,%rd
    [all...]
  /external/openssl/crypto/modes/asm/
ghash-x86_64.S 12 movzbq 15(%rdi),%r8
20 movq 8(%rsi,%rax,1),%r8
23 movq %r8,%rdx
28 shrq $4,%r8
33 xorq 8(%rsi,%rbx,1),%r8
38 movq %r8,%rdx
40 xorq %r10,%r8
44 shrq $4,%r8
48 xorq 8(%rsi,%rax,1),%r8
53 movq %r8,%rd
    [all...]
  /external/libunwind/tests/
flush-cache.S 15 add r8=31,in1 // round up to 32 byte-boundary
17 shr.u r8=r8,5 // we flush 32 bytes per iteration
19 add r8=-1,r8
25 mov ar.lc=r8
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 28 push {r4-r8, lr}
38 sub r8, r0, r1, lsl #3 ; move src pointer down by 8 lines
40 vld1.u8 {d0}, [r8@64], r1 ; p7
41 vld1.u8 {d1}, [r8@64], r1 ; p6
42 vld1.u8 {d2}, [r8@64], r1 ; p5
43 vld1.u8 {d3}, [r8@64], r1 ; p4
44 vld1.u8 {d4}, [r8@64], r1 ; p3
45 vld1.u8 {d5}, [r8@64], r1 ; p2
46 vld1.u8 {d6}, [r8@64], r1 ; p1
47 vld1.u8 {d7}, [r8@64], r1 ; p
    [all...]
  /external/libvpx/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 28 push {r4-r8, lr}
38 sub r8, r0, r1, lsl #3 ; move src pointer down by 8 lines
40 vld1.u8 {d0}, [r8@64], r1 ; p7
41 vld1.u8 {d1}, [r8@64], r1 ; p6
42 vld1.u8 {d2}, [r8@64], r1 ; p5
43 vld1.u8 {d3}, [r8@64], r1 ; p4
44 vld1.u8 {d4}, [r8@64], r1 ; p3
45 vld1.u8 {d5}, [r8@64], r1 ; p2
46 vld1.u8 {d6}, [r8@64], r1 ; p1
47 vld1.u8 {d7}, [r8@64], r1 ; p
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/arm/armv6/
vp8_subtract_armv6.asm 38 ldr r8, [r1, #vp8_blockd_predictor]
45 ldr r1, [r8], r2 ; pred
93 uxtb16 r8, r6 ; [s2 | s0] (A)
98 usub16 r6, r8, r9 ; [d2 | d0] (A)
104 pkhbt r8, r6, r7, lsl #16 ; [d1 | d0] (A)
107 str r8, [r0], #4 ; diff (A)
108 uxtb16 r8, r10 ; [s2 | s0] (B)
115 usub16 r6, r8, r9 ; [d2 | d0] (B)
121 pkhbt r8, r6, r7, lsl #16 ; [d1 | d0] (B)
124 str r8, [r0], #4 ; diff (B
    [all...]

Completed in 770 milliseconds

1 2 3 4 5 6 7 8 91011>>