HomeSort by relevance Sort by last modified time
    Searched refs:r9 (Results 1 - 25 of 691) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/chromium_org/third_party/boringssl/linux-arm/crypto/sha/
sha1-armv4-large.S 25 ldrb r9,[r1,#3]
29 orr r9,r9,r10,lsl#8
31 orr r9,r9,r11,lsl#16
33 orr r9,r9,r12,lsl#24
35 ldr r9,[r1],#4 @ handles unaligned
40 rev r9,r9 @ byte swa
    [all...]
  /external/openssl/crypto/bn/asm/
ppc.pl 232 #.set r9,9
275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
279 # r9,r10, r11 are the equivalents of c1,c2, c3.
288 $UMULL r9,r5,r5
294 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
302 addze r9,r0 # catch carry if any.
303 # r9= r0(=0) and carry
307 addze r9,r9
314 adde r9,r8,r
    [all...]
  /external/linux-tools-perf/perf-3.12.0/arch/avr32/lib/
memcpy.S 22 mov r9, r11
23 andl r9, 3, COH
27 2: mov r9, r12
37 reteq r9
48 retal r9
52 movlt r9, r12
54 add r10, r9
55 lsl r9, 2
56 add pc, pc, r9
64 add pc, pc, r9
    [all...]
memset.S 27 mov r9, r12
30 andl r9, 3, COH
64 add r10, r9
65 lsl r9, 1
66 add pc, r9
  /external/llvm/test/MC/ARM/
not-armv4.s 5 clz r4,r9
8 rbit r4,r9
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/arm/armv6/
walsh_v6.asm 35 ldrd r8, r9, [r0], r2
42 qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10]
43 qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10]
46 qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14]
65 lsls r2, r9, #16
66 smuad r2, r9, lr ; D0 = a1<<2 + d1<<2
106 smusd r9, r9, lr ; D3 = a1<<2 - d1<<2
107 add r7, r5, r9 ; d1_3 = B3 + D3
108 sub r5, r5, r9 ; c1_3 = B3 - D
    [all...]
  /external/libvpx/libvpx/vp8/encoder/arm/armv6/
walsh_v6.asm 35 ldrd r8, r9, [r0], r2
42 qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10]
43 qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10]
46 qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14]
65 lsls r2, r9, #16
66 smuad r2, r9, lr ; D0 = a1<<2 + d1<<2
106 smusd r9, r9, lr ; D3 = a1<<2 - d1<<2
107 add r7, r5, r9 ; d1_3 = B3 + D3
108 sub r5, r5, r9 ; c1_3 = B3 - D
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/
walsh_v6.asm 35 ldrd r8, r9, [r0], r2
42 qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10]
43 qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10]
46 qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14]
65 lsls r2, r9, #16
66 smuad r2, r9, lr ; D0 = a1<<2 + d1<<2
106 smusd r9, r9, lr ; D3 = a1<<2 - d1<<2
107 add r7, r5, r9 ; d1_3 = B3 + D3
108 sub r5, r5, r9 ; c1_3 = B3 - D
    [all...]
  /external/linux-tools-perf/perf-3.12.0/arch/powerpc/lib/
memcpy_64.S 43 ld r9,0(r4)
50 mr r8,r9
52 1: ld r9,8(r4)
55 stdu r9,16(r3)
62 lwz r9,8(r4)
64 stw r9,0(r3)
67 lhz r9,8(r4)
69 sth r9,0(r3)
72 lbz r9,8(r4)
73 stb r9,0(r3
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
omxVCM4P10_TransformDequantChromaDCFromPair_s.S 32 ldr r9, [r0,#0]
36 ldrb r6, [r9], #1
39 ldrnesb r5, [r9, #1]
40 ldrneb r4, [r9], #2
42 ldreqsb r4, [r9], #1
45 ldreqb r6, [r9], #1
49 str r9, [r0, #0]
56 ldrsb r9, [r5, r2]
60 lsl r2, r2, r9
armVCM4P10_InterpolateLuma_Align_unsafe_s.S 42 SUBS r9,r9,#1
49 SUBS r9,r9,#1
61 SUBS r9,r9,#1
73 SUBS r9,r9,#1
101 SUBS r9,r9,#
    [all...]
armVCM4P10_InterpolateLuma_Copy_unsafe_s.S 46 LDR r9,[r0],r1
48 STR r9,[r2],r3
53 LDR r9,[r0,#4]
59 ORR r8,r8,r9,LSL #24
63 LDR r9,[r0,#4]
69 ORR r8,r8,r9,LSL #24
75 LDR r9,[r0,#4]
81 ORR r8,r8,r9,LSL #16
85 LDR r9,[r0,#4]
91 ORR r8,r8,r9,LSL #1
    [all...]
omxVCM4P10_DeblockLuma_I.S 31 PUSH {r4-r9,lr}
34 MOV r9,r1
41 TSTEQ r9,#7
57 POP {r4-r9,pc}
63 MOV r1,r9
74 MOV r1,r9
78 POP {r4-r9,pc}
  /external/valgrind/main/coregrind/m_syswrap/
syscall-s390x-linux.S 96 we clobber (r6-r9) */
97 stmg %r2,%r9, SP_R2(%r15)
108 lg %r9, SP_R3(%r15) /* guest state --> r9 */
109 lg %r2, OFFSET_s390x_r2(%r9) /* guest r2 --> real r2 */
110 lg %r3, OFFSET_s390x_r3(%r9) /* guest r3 --> real r3 */
111 lg %r4, OFFSET_s390x_r4(%r9) /* guest r4 --> real r4 */
112 lg %r5, OFFSET_s390x_r5(%r9) /* guest r5 --> real r5 */
113 lg %r6, OFFSET_s390x_r6(%r9) /* guest r6 --> real r6 */
114 lg %r7, OFFSET_s390x_r7(%r9) /* guest r7 --> real r7 *
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
convolve_opt.s 42 LDRSH r9, [r6], #2 @ *tmpX++
45 MUL r8, r9, r10
50 LDRSH r9, [r6], #2 @ *tmpX++
54 MLA r8, r9, r10, r8
56 LDRSH r9, [r6], #2 @ *tmpX++
60 MLA r8, r9, r10, r8
77 LDRSH r9, [r6], #2 @ *tmpX++
82 MUL r8, r9, r10
89 LDRSH r9, [r6], #2 @ *tmpX++
93 MLA r8, r9, r10, r
    [all...]
cor_h_vec_opt.s 49 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
55 LDRSH r8, [r9], #2
56 LDRSH r14, [r9]
68 ADD r9, r5, r14
69 MOV r5, r9, ASR #16
71 ADD r9, r3, r2, LSL #1 @address of sign[pos]
73 LDRSH r10, [r9], #2 @sign[pos]
74 LDRSH r11, [r9] @sign[pos + 1]
79 LDR r9, [r13, #44]
83 ADD r9, r9, r4, LSL #
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
cor_h_vec_neon.s 50 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
56 LDRSH r8, [r9], #2
57 LDRSH r14, [r9]
69 ADD r9, r5, r14
70 MOV r5, r9, ASR #16
72 ADD r9, r3, r2, LSL #1 @address of sign[pos]
74 LDRSH r10, [r9], #2 @sign[pos]
75 LDRSH r11, [r9] @sign[pos + 1]
80 LDR r9, [r13, #44]
84 ADD r9, r9, r4, LSL #
    [all...]
  /system/core/libpixelflinger/
col32cb16blend.S 43 mov r9, #0xff // create mask
47 and r12, r9, r1, lsr #8 // extract green
48 and r4, r9, r1, lsr #16 // extract blue
52 mov r9, r9, lsr #2 // create dest green mask
58 and r7, r9, r8, lsr #5 // extract dest green
rotate90CW_4x4_16v6.S 36 stmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, lr}
43 ldrd r8, r9, [r1]
53 pkhbt r10, r9, r7, lsl #16
58 pkhtb r10, r7, r9, asr #16
62 ldmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, pc}
  /external/openssl/crypto/sha/asm/
sha1-armv4-large.S 33 ldrb r9,[r1,#3]
37 orr r9,r9,r10,lsl#8
39 orr r9,r9,r11,lsl#16
41 orr r9,r9,r12,lsl#24
43 ldr r9,[r1],#4 @ handles unaligned
48 rev r9,r9 @ byte swa
    [all...]
  /external/tremolo/Tremolo/
mdctLARM.s 187 LDMFD r12,{r8,r9,r10} @ r8 = step
188 @ r9 = wL
198 LDRB r11,[r9],#1 @ r11= *wL++
227 LDMFD r12,{r8,r9,r10} @ r8 = step
228 @ r9 = wL
237 LDRB r11,[r9],#1 @ r11= *wL++
327 MUL r9, r6, r10 @ r9 = s0*T[0]
329 MLA r9, r7, r11,r9 @ r9 += s2*T[1
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 65 smulwt r9, r3, r6
69 pkhbt r7, r7, r9, lsl #16
74 smulwb r9, r3, r12
77 pkhbt r9, r9, r11, lsl #16
80 uadd16 r7, r12, r9
85 uadd16 r9, r10, r6
93 str r9, [r1], #4
102 ldr r9, [r0], #4
112 pkhbt lr, r9, r7, lsl #1
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 65 smulwt r9, r3, r6
69 pkhbt r7, r7, r9, lsl #16
74 smulwb r9, r3, r12
77 pkhbt r9, r9, r11, lsl #16
80 uadd16 r7, r12, r9
85 uadd16 r9, r10, r6
93 str r9, [r1], #4
102 ldr r9, [r0], #4
112 pkhbt lr, r9, r7, lsl #1
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/
dequant_idct_v6.asm 65 smulwt r9, r3, r6
69 pkhbt r7, r7, r9, lsl #16
74 smulwb r9, r3, r12
77 pkhbt r9, r9, r11, lsl #16
80 uadd16 r7, r12, r9
85 uadd16 r9, r10, r6
93 str r9, [r1], #4
102 ldr r9, [r0], #4
112 pkhbt lr, r9, r7, lsl #1
    [all...]
  /bionic/libc/arch-x86_64/string/
sse2-memmove-slm.S 215 lea (%r8, %rsi), %r9
234 movzbl -1(%r9,%rdx), %esi
235 movzbl (%r9), %ebx
241 movdqu (%r9), %xmm0
242 movdqu 16(%r9), %xmm1
243 movdqu -32(%r9, %rdx), %xmm2
244 movdqu -16(%r9, %rdx), %xmm3
252 movdqu (%r9), %xmm0
253 movdqu -16(%r9, %rdx), %xmm1
259 movl (%r9), %es
    [all...]

Completed in 447 milliseconds

1 2 3 4 5 6 7 8 91011>>