HomeSort by relevance Sort by last modified time
    Searched refs:r8 (Results 1 - 25 of 752) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/
bswap64.asm 3 bswap r8
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_Copy_unsafe_s.S 29 LDR r8,[r0],r1
32 STR r8,[r2],r3
39 LDR r8,[r0],r1
43 LSR r8,r8,#8
44 ORR r8,r8,r9,LSL #24
47 STR r8,[r2],r3
49 LDR r8,[r0],r1
53 LSR r8,r8,#
    [all...]
armVCM4P10_DecodeCoeffsToPair_s.S 40 LDRB r8,[r10,#1]
44 ORR r9,r9,r8,LSL #8
46 LSLS r8,r11,r12
48 AND r7,r7,r8,LSR #27
52 LDRB r8,[r10],#1
57 ORRCS r11,r8,r11,LSL #8
58 LSRS r8,r7,#1
60 LSLS r8,r11,r12
62 ADD r7,r7,r8,LSR #29
67 BIC r7,r8,#0xf00
    [all...]
  /external/linux-tools-perf/perf-3.12.0/arch/avr32/lib/
memcpy.S 31 3: ld.w r8, r11++
33 st.w r12++, r8
42 ld.ub r8, r11++
43 st.b r12++, r8
44 ld.ub r8, r11++
45 st.b r12++, r8
46 ld.ub r8, r11++
47 st.b r12++, r8
57 ld.ub r8, r11++
58 st.b r12++, r8
    [all...]
memset.S 28 mov r8, r12
38 4: st.w r8++, r11
54 st.b r8++, r11
55 st.b r8++, r11
58 6: st.b r8++, r11
67 st.b r8++, r11
68 st.b r8++, r11
69 st.b r8++, r11
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
convolve_opt.s 45 MUL r8, r9, r10
54 MLA r8, r9, r10, r8
55 MLA r8, r12, r14, r8
60 MLA r8, r9, r10, r8
62 MLA r8, r12, r14, r8
68 ADD r5, r11, r8, LSL #
    [all...]
  /bionic/libc/arch-x86_64/string/
sse2-memcpy-slm.S 129 lea 64(%rdi), %r8
130 and $-64, %r8
140 cmp %r8, %rdx
144 cmp %r8, %rdx
151 prefetcht0 128(%r8, %rsi)
153 movdqu (%r8, %rsi), %xmm0
154 movdqu 16(%r8, %rsi), %xmm1
155 movdqu 32(%r8, %rsi), %xmm2
156 movdqu 48(%r8, %rsi), %xmm3
157 movdqa %xmm0, (%r8)
    [all...]
sse2-memmove-slm.S 165 lea 64(%rdi), %r8
166 and $-64, %r8 /* r8 now aligned to next 64 byte boundary */
169 movdqu (%r8, %rsi), %xmm4
170 movdqu 16(%r8, %rsi), %xmm5
171 movdqu 32(%r8, %rsi), %xmm6
172 movdqu 48(%r8, %rsi), %xmm7
178 movdqa %xmm4, (%r8)
179 movaps %xmm5, 16(%r8)
180 movaps %xmm6, 32(%r8)
    [all...]
  /external/linux-tools-perf/perf-3.12.0/arch/arm/lib/
memset.S 34 * We need 2 extra registers for this loop - use r8 and the LR
36 stmfd sp!, {r8, lr}
37 mov r8, r1
41 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
42 stmgeia ip!, {r1, r3, r8, lr}
43 stmgeia ip!, {r1, r3, r8, lr}
44 stmgeia ip!, {r1, r3, r8, lr}
46 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
51 stmneia ip!, {r1, r3, r8, lr}
52 stmneia ip!, {r1, r3, r8, lr
    [all...]
  /hardware/samsung_slsi/exynos5/libswconverter/
csc_tiled_to_linear_uv_neon.s 62 @r8 src_offset
80 mul r8, r11, r5 @ src_offset = tiled_width * i
82 add r8, r1, r8 @ src_offset = y_src + src_offset
84 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
85 vld1.8 {q0, q1}, [r8]!
87 vld1.8 {q2, q3}, [r8]!
89 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
90 vld1.8 {q4, q5}, [r8]!
92 vld1.8 {q6, q7}, [r8]!
    [all...]
csc_tiled_to_linear_y_neon.s 63 @r8 src_offset
81 mul r8, r11, r5 @ src_offset = tiled_width * i
83 add r8, r1, r8 @ src_offset = y_src + src_offset
85 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
86 vld1.8 {q0, q1}, [r8]!
87 vld1.8 {q2, q3}, [r8]!
88 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
89 vld1.8 {q4, q5}, [r8]!
90 vld1.8 {q6, q7}, [r8]!
    [all...]
csc_tiled_to_linear_uv_deinterleave_neon.s 65 @r8 src_offset
83 mul r8, r11, r5 @ src_offset = tiled_width * i
85 add r8, r2, r8 @ src_offset = uv_src + src_offset
90 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
91 vld2.8 {q0, q1}, [r8]!
93 vld2.8 {q2, q3}, [r8]!
95 pld [r8, #(CACHE_LINE_SIZE*PRE_LOAD_OFFSET)]
96 vld2.8 {q4, q5}, [r8]!
98 vld2.8 {q6, q7}, [r8]!
    [all...]
  /external/libhevc/common/arm/
ihevc_intra_pred_luma_mode_18_34.s 129 mov r8,r0
133 vld1.8 {d0},[r8],r6
135 vld1.8 {d1},[r8],r6
137 vld1.8 {d2},[r8],r6
138 vld1.8 {d3},[r8],r6
140 vld1.8 {d4},[r8],r6
141 vld1.8 {d5},[r8],r6
142 vld1.8 {d6},[r8],r6
144 vld1.8 {d7},[r8],r6
152 movne r8,r
    [all...]
ihevc_intra_pred_chroma_mode_18_34.s 128 mov r8,r0
134 vld1.8 {d0,d1},[r8],r6
136 vld1.8 {d2,d3},[r8],r6
138 vld1.8 {d4,d5},[r8],r6
140 vld1.8 {d6,d7},[r8],r6
142 vld1.8 {d8,d9},[r8],r6
144 vld1.8 {d10,d11},[r8],r6
146 vld1.8 {d12,d13},[r8],r6
148 vld1.8 {d14,d15},[r8],r6
155 add r8,r0,#1
    [all...]
  /external/chromium_org/third_party/openssl/openssl/crypto/bn/asm/
ppc.pl 231 #.set r8,8
275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
278 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
298 $UMULH r8,r5,r6
300 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
301 adde r8,r8,r8
306 addze r11,r8 # r8 added to r11 which is 0
    [all...]
  /external/openssl/crypto/bn/asm/
ppc.pl 231 #.set r8,8
275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
278 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
298 $UMULH r8,r5,r6
300 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
301 adde r8,r8,r8
306 addze r11,r8 # r8 added to r11 which is 0
    [all...]
  /frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/
band_nrg_v5.s 54 smull r6, r8, r6, r6
60 qadd r14, r14, r8
61 smull r6, r8, r6, r6
65 qadd r14, r14, r8
113 ldr r8, [r0, +r10, lsl #2]
115 mov r8, r8, asr #1
120 add r5, r8, r9
122 sub r8, r8, r
    [all...]
CalcWindowEnergy_v5.s 42 mov r8, #0 @ w=0
50 str r8, [r13, #4]
61 smull r0, r8, r12, r11 @ accu2 = fixmul( Coeff0, states1 );
64 mov r8, r8, lsl #1
67 sub r8, r0, r8 @ out = accu3 - accu2;
70 mov r11, r8 @ states1 = out;
73 mov r8, r8, asr #1
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
residu_asm_neon.s 43 MOV r8, r9
44 VLD1.S16 D5, [r8]! @get x[i], x[i+1], x[i+2], x[i+3]
47 SUB r8, r9, #2 @get the x[i-1] address
48 VLD1.S16 D5, [r8]!
51 SUB r8, r9, #4 @load the x[i-2] address
52 VLD1.S16 D5, [r8]!
55 SUB r8, r9, #6 @load the x[i-3] address
56 VLD1.S16 D5, [r8]!
59 SUB r8, r9, #8 @load the x[i-4] address
60 VLD1.S16 D5, [r8]!
    [all...]
convolve_neon.s 39 @MOV r8, #0 @ s = 0
47 MUL r8, r9, r10
64 ADD r5, r5, r8
71 @MOV r8, #0
80 MUL r8, r9, r10
82 MLA r8, r12, r14, r8
100 ADD r8, r8, r5
101 ADD r8, r11, r8, LSL #
    [all...]
  /system/core/libpixelflinger/
col32cb16blend.S 55 ldrh r8, [r0] // load dest pixel
57 mov r6, r8, lsr #11 // extract dest red
58 and r7, r9, r8, lsr #5 // extract dest green
59 and r8, r8, #0x1f // extract dest blue
63 smlabb r8, r8, r5, r4 // dest blue * alpha + src blue
69 orr r6, r8, lsr #8 // shift blue into 565
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
    [all...]
  /frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/
idct.cpp 22 This file contains the functions that transform an 8r8 image block from
44 coefficients of type int for an 8r8 image block;
57 blk points to the found IDCT values for an 8r8 image block.
68 This function transforms an 8r8 image block from dequantized DCT coefficients
82 1. Find horizontal 1-D IDCT values for each row from 8r8 dequantized DCT
131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ local
182 r8 = W7 * (r4 + r5);
183 r4 = (r8 + (W1 - W7) * r4);
186 r5 = (r8 - (W1 + W7) * r5);
187 r8 = W3 * (r6 + r7)
353 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; \/* butterfly nodes *\/ local
    [all...]

Completed in 1092 milliseconds

1 2 3 4 5 6 7 8 91011>>