HomeSort by relevance Sort by last modified time
    Searched refs:r8 (Results 51 - 75 of 298) sorted by null

1 23 4 5 6 7 8 91011>>

  /frameworks/base/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
omxVCM4P10_DeblockLuma_I.S 21 MOV r8,r3
29 CMPNE r8,#0
45 MOV r3,r8
57 ADD r3,r8,#2
  /external/tremolo/Tremolo/
mdctARM.s 187 LDMFD r12,{r8,r9,r10} @ r8 = step
191 MOV r8, r8, LSL #1
208 STRH r6, [r0], r8
226 LDMFD r12,{r8,r9,r10} @ r8 = step
230 MOV r8, r8, LSL #1
247 STRH r6, [r0], r8
    [all...]
dpen.s 55 LDMFD r13!,{r4-r8,r10,PC}
68 STMFD r13!,{r4-r8,r10,r14}
99 ADC r2, r6, r7, LSL #1 @ r8 = &t[chase*2+C]
112 LDMFD r13!,{r4-r8,r10,PC}
124 ADC r8, r6, r7 @ r8 = t+chase+bit
125 LDRB r10,[r8], -r6 @ r10= next=t[chase+bit] r8=chase+bit
138 CMP r8, r7 @ if bit==0 (chase+bit==chase) (sets C)
141 ADC r12,r8, r6 @ r12= chase+bit+1+
    [all...]
  /external/valgrind/main/none/tests/x86/
insn_basic.def 27 adcb eflags[0x1,0x0] : r8.ub[12] r8.ub[34] => 1.ub[46]
28 adcb eflags[0x1,0x1] : r8.ub[12] r8.ub[34] => 1.ub[47]
29 adcb eflags[0x1,0x0] : r8.ub[12] m8.ub[34] => 1.ub[46]
30 adcb eflags[0x1,0x1] : r8.ub[12] m8.ub[34] => 1.ub[47]
31 adcb eflags[0x1,0x0] : m8.ub[12] r8.ub[34] => 1.ub[46]
32 adcb eflags[0x1,0x1] : m8.ub[12] r8.ub[34] => 1.ub[47]
64 addb r8.ub[12] r8.ub[34] => 1.ub[46
    [all...]
  /frameworks/base/media/libstagefright/codecs/mp3dec/src/asm/
pvmp3_polyphase_filter_window_arm.s 80 ldr r8,[r3,#0x700]
83 smull r6,r2,r5,r8
86 smlal r8,r9,r5,r8
98 ldr r8,[r3,#0x600]
101 smull r2,r5,r6,r8
104 smlal r8,r9,r6,r8
116 ldr r8,[r3,#0x500]
119 smull r2,r5,r6,r8
    [all...]
pvmp3_polyphase_filter_window_gcc.s 78 ldr r8,[r3,#0x700]
81 smull r6,r2,r5,r8
84 smlal r8,r9,r5,r8
96 ldr r8,[r3,#0x600]
99 smull r2,r5,r6,r8
102 smlal r8,r9,r6,r8
114 ldr r8,[r3,#0x500]
117 smull r2,r5,r6,r8
    [all...]
pvmp3_polyphase_filter_window_wm.asm 75 ldr r8,[r3,#0x700]
78 smull r6,r2,r5,r8
81 smlal r8,r9,r5,r8
93 ldr r8,[r3,#0x600]
96 smull r2,r5,r6,r8
99 smlal r8,r9,r6,r8
111 ldr r8,[r3,#0x500]
114 smull r2,r5,r6,r8
    [all...]
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-linux.S 71 Int sigsetSzB) // r8
95 pushq %r8
98 popq %r8 ; \
118 movq %r8, %r10 // sigsetSzB
136 movq OFFSET_amd64_R8(%rax), %r8
160 movq %r8, %r10 // sigsetSzB
  /frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
Syn_filt_32_opt.s 52 LDRSH r8, [r0, #6] @ load Aq[3]
55 AND r8, r8, r14
57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3]
63 LDRSH r8, [r0, #14] @ load Aq[7]
66 AND r8, r8, r14
68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7]
74 LDRSH r8, [r0, #22] @ load Aq[11]
77 AND r8, r8, r1
    [all...]
Norm_Corr_opt.s 56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
101 @r7 --- scale r4 --- t_min r8 --- excf[]
109 MOV r8, #0x8000
180 ADD r12, r12, r8
196 MOV r8, r0 @ exc[]
200 ADD r8, r8, r5, LSL #1 @ exc[k] address
203 LDRSH r11, [r8] @ tmp = exc[k
    [all...]
  /external/openssl/crypto/aes/asm/
aes-armv4.s 203 and r8,lr,r0,lsr#8
209 ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
210 and r8,lr,r1
217 ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
222 eor r5,r5,r8,ror#8
223 and r8,lr,r2,lsr#16 @ i1
230 ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16]
235 eor r1,r1,r8,ror#
    [all...]
  /external/openssl/crypto/sha/asm/
sha256-armv4.s 31 ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
44 mov r0,r8,ror#6
46 eor r0,r0,r8,ror#11
48 eor r0,r0,r8,ror#25 @ Sigma1(e)
49 and r2,r2,r8
77 eor r2,r8,r9
107 eor r2,r7,r8
111 eor r2,r2,r8 @ Ch(e,f,g)
142 add r3,r3,r8
143 mov r8,r9,ror#
    [all...]
  /external/openssl/crypto/bn/asm/
armv4-mont.s 24 ldr r8,[r0,#14*4] @ &n0
28 ldr r8,[r8] @ *n0
32 str r8,[r0,#14*4] @ save n0 value
33 mul r8,r10,r8 @ "tp[0]"*n0
35 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
45 umlal r12,r14,r6,r8 @ np[j]*n0
57 ldr r8,[r0,#14*4] @ restore n0
74 mul r8,r10,r
    [all...]
  /frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
Norm_Corr_neon.s 56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
109 @r7 --- scale r4 --- t_min r8 --- excf[]
114 MOV r8, #0x8000
220 ADD r12, r12, r8
235 MOV r8, r0 @ exc[]
239 ADD r8, r8, r5, LSL #1 @ exc[k] address
242 LDRSH r11, [r8] @ tmp = exc[k
    [all...]
syn_filt_neon.s 49 MOV r8, #0 @ i = 0
58 MOV r8, #0 @ loop times
68 ADD r10, r4, r8, LSL #1 @ y[i], yy[i] address
79 ADD r8, r8, #1
88 CMP r8, #80
  /external/libffi/src/powerpc/
aix.S 35 .set r8,8
102 mr r12,r8 // We only need r12 until the call, so it doesn't have to be saved...
104 mr r8,r1
112 stw r28,-16(r8)
113 stw r29,-12(r8)
114 stw r30, -8(r8)
115 stw r31, -4(r8)
117 stw r9, 8(r8)
124 mr r28,r8 /* our AP. */
151 lwz r8, 20+(6*4)(r1
    [all...]
  /external/libvpx/vp8/encoder/arm/armv6/
vp8_fast_fdct4x4_armv6.asm 49 ldrd r8, r9, [r0] ; [i5 | i4] [i7 | i6]
59 qadd16 r6, r8, r9 ; [i5+i6 | i4+i7] = [b1 | a1] without shift
60 qsub16 r7, r8, r9 ; [i5-i6 | i4-i7] = [c1 | d1] without shift
69 smusd r8, r6, lr ; o6 = (i5+i6)*8 - (i4+i7)*8
77 pkhbt r6, r8, r7, lsl #4 ; [o7 | o6]
94 smusd r8, r6, lr ; o10 = (i9+i10)*8 - (i8+i11)*8
102 pkhbt r6, r8, r7, lsl #4 ; [o11 | o10]
147 lsl r8, r2, #16 ; prepare bottom halfword for scaling
151 pkhtb r4, r2, r8, asr #20 ; pack and scale bottom halfword
159 smlabb r8, r7, r12, r2 ; [ ------ | d1*5352
    [all...]
vp8_variance16x16_armv6.asm 28 mov r8, #0 ; initialize sum = 0
49 adds r8, r8, r4 ; add positive differences to sum
50 subs r8, r8, r5 ; substract negative differences from sum
73 add r8, r8, r4 ; add positive differences to sum
74 sub r8, r8, r5 ; substract negative differences from sum
97 add r8, r8, r4 ; add positive differences to su
    [all...]
vp8_variance_halfpixvar16x16_h_armv6.asm 28 mov r8, #0 ; initialize sum = 0
54 adds r8, r8, r4 ; add positive differences to sum
55 subs r8, r8, r5 ; substract negative differences from sum
85 add r8, r8, r4 ; add positive differences to sum
86 sub r8, r8, r5 ; substract negative differences from sum
116 add r8, r8, r4 ; add positive differences to su
    [all...]
vp8_variance_halfpixvar16x16_v_armv6.asm 28 mov r8, #0 ; initialize sum = 0
55 adds r8, r8, r4 ; add positive differences to sum
56 subs r8, r8, r5 ; substract negative differences from sum
86 add r8, r8, r4 ; add positive differences to sum
87 sub r8, r8, r5 ; substract negative differences from sum
117 add r8, r8, r4 ; add positive differences to su
    [all...]
  /bionic/libc/arch-arm/bionic/
memcpy.S 223 ldmmiia r1!, {r8, r9} /* 8 bytes */
225 stmmiia r0!, {r8, r9}
294 ldmmiia r1!, {r8, r9} /* 8 bytes */
296 stmmiia r0!, {r8, r9}
383 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
395 orr r7, r7, r8, lsl #16
396 mov r8, r8, lsr #16
397 orr r8, r8, r9, lsl #1
    [all...]
  /bionic/libc/kernel/arch-x86/asm/
ptrace.h 55 unsigned long r8; member in struct:pt_regs
  /dalvik/vm/compiler/template/armv5te/
TEMPLATE_STRING_COMPARETO.S 59 * r3, r4, r7, r8, r9, r12 available for loading string data
73 ldrh r8, [r1, #2]!
75 subeqs r0, r7, r8
86 ldrh r8, [r1, #2]!
90 subeqs r0, r7, r8
  /development/ndk/platforms/android-9/arch-x86/include/asm/
ptrace.h 55 unsigned long r8; member in struct:pt_regs
  /prebuilt/linux-x86/toolchain/i686-linux-glibc2.7-4.4.3/sysroot/usr/include/asm/
ptrace.h 46 unsigned long r8; member in struct:pt_regs

Completed in 2742 milliseconds

1 23 4 5 6 7 8 91011>>