HomeSort by relevance Sort by last modified time
    Searched refs:r11 (Results 1 - 25 of 324) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S 21 LDR r11,[r0,#0xc]
25 UQSUB16 r11,r11,r6
29 USAT16 r11,#13,r11
33 AND r11,r12,r11,LSR #5
37 ORR r11,r10,r11,LSL #8
40 STRD r10,r11,[r7],#
    [all...]
armVCM4P10_Average_4x_Align_unsafe_s.S 21 LDR r11,[r0],r1
25 UHSUB8 r4,r11,lr
32 LDR r11,[r0],r1
37 UHSUB8 r4,r11,lr
55 LDR r11,[r0],r1
60 LSR r11,r11,#16
61 ORR r11,r11,r5,LSL #16
63 UHSUB8 r4,r11,l
    [all...]
armVCM4P10_InterpolateLuma_Align_unsafe_s.S 26 LDM r0,{r7,r10,r11}
29 STM r8!,{r7,r10,r11}
33 LDM r0,{r7,r10,r11}
39 ORR r10,r10,r11,LSL #24
40 LSR r11,r11,#8
41 STM r8!,{r7,r10,r11}
45 LDM r0,{r7,r10,r11}
51 ORR r10,r10,r11,LSL #16
52 LSR r11,r11,#1
    [all...]
omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S 29 ADD r11,r1,r1
35 VLD1.8 {d7},[r0],r11
36 VLD1.8 {d8},[r10],r11
37 VLD1.8 {d5},[r0],r11
39 VLD1.8 {d10},[r10],r11
40 VLD1.8 {d6},[r0],r11
42 VLD1.8 {d9},[r10],r11
43 VLD1.8 {d4},[r0],r11
44 VLD1.8 {d11},[r10],r11
89 VST1.8 {d7},[r0],r11
    [all...]
  /system/core/libpixelflinger/
rotate90CW_4x4_16v6.S 36 stmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, lr}
46 pkhbt r11, r4, r2, lsl #16
47 strd r10, r11, [r0], r12
50 pkhtb r11, r2, r4, asr #16
52 strd r10, r11, [r0], r12
54 pkhbt r11, r5, r3, lsl #16
56 strd r10, r11, [r0], r12
59 pkhtb r11, r3, r5, asr #16
60 strd r10, r11, [r0]
62 ldmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, pc
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
pred_lt4_1_opt.s 69 SMULBB r11, r6, r3 @x[1] * h[0]
74 SMLABT r11, r9, r3, r11 @x[2] * h[1]
80 SMLABB r11, r4, r3, r11 @x[3] * h[2]
85 SMLABT r11, r6, r3, r11 @x[4] * h[3]
91 SMLABB r11, r9, r3, r11 @x[5] * h[4]
96 SMLABT r11, r4, r3, r11 @x[6] * h[5
    [all...]
residu_asm_opt.s 56 LDRH r11, [r0], #2
57 ORR r10, r11, r10, LSL #16 @r10 --- a10, a11
59 LDRH r11, [r0], #2
61 ORR r11, r12, r11, LSL #16 @r11 --- a12, a13
82 SMULTB r11, r5, r10 @i3(0) --- r11 = x[2] * a0
86 SMLABT r11, r5, r2, r11 @i3(1) --- r11 += x[1] * a
    [all...]
Syn_filt_32_opt.s 57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3]
59 STR r11, [r13, #-8]
68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7]
70 STR r11, [r13, #-16]
79 ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11]
81 STR r11, [r13, #-24]
90 ORR r11, r8, r9, LSL #16 @ Aq[16] -- Aq[15]
92 STR r11, [r13, #-32]
100 LDR r11, [r13, #-4] @ Aq[2] -- Aq[1]
104 SMULBB r12, r6, r11 @ sig_lo[i-1] * Aq[1
    [all...]
Norm_Corr_opt.s 58 RSB r11, r4, #0 @k = -t_min
59 ADD r5, r0, r11, LSL #1 @get the &exc[k]
79 LDR r11, [r14], #4
85 SMLABB r6, r11, r11, r6
86 SMLATT r6, r11, r11, r6
112 LDR r11, [r14], #4 @load excf[i], excf[i+1]
114 SMLABB r6, r11, r11, r6 @L_tmp1 += excf[i] * excf[i
    [all...]
  /external/openssl/crypto/bn/asm/
ppc.pl 234 #.set r11,11
275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
279 # r9,r10, r11 are the equivalents of c1,c2, c3.
291 # Note c3(r11) is NOT set to 0
306 addze r11,r8 # r8 added to r11 which is 0
313 addc r11,r7,r11
325 addc r11,r7,r11
    [all...]
x86_64-mont.S 26 movq %rsp,%r11
31 movq %r11,8(%rsp,%r9,8)
47 movq %rdx,%r11
63 addq %r11,%r13
64 movq %r10,%r11
71 addq %rax,%r11
84 addq %r11,%r13
88 movq %r10,%r11
91 addq %r11,%r13
110 movq %rdx,%r11
    [all...]
  /external/llvm/test/MC/X86/
x86_64-bmi-encoding.s 7 // CHECK: blsmskq %r11, %r10
9 blsmskq %r11, %r10
23 // CHECK: blsiq %r11, %r10
25 blsiq %r11, %r10
39 // CHECK: blsrq %r11, %r10
41 blsrq %r11, %r10
55 // CHECK: andnq (%rax), %r11, %r10
57 andnq (%rax), %r11, %r10
71 // CHECK: bextrq %r12, %r11, %r10
73 bextrq %r12, %r11, %r1
    [all...]
  /external/compiler-rt/lib/arm/
aeabi_ldivmod.S 22 push {r11, lr}
30 pop {r11, pc}
aeabi_uldivmod.S 22 push {r11, lr}
30 pop {r11, pc
  /external/valgrind/main/coregrind/m_syswrap/
syscall-amd64-darwin.S 117 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
118 movq OFFSET_amd64_RDI(%r11), %rdi
119 movq OFFSET_amd64_RSI(%r11), %rsi
120 movq OFFSET_amd64_RDX(%r11), %rdx
121 movq OFFSET_amd64_RCX(%r11), %r10 /* rcx is passed in r10 instead */
122 movq OFFSET_amd64_R8(%r11), %r8
123 movq OFFSET_amd64_R9(%r11), %r9
125 movq OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP *
    [all...]
  /hardware/samsung_slsi/exynos5/libswconverter/
csc_interleave_memcpy_neon.s 61 @r11 src1_addr
68 mov r11, r1
76 vld1.8 {q0}, [r11]!
77 vld1.8 {q2}, [r11]!
78 vld1.8 {q4}, [r11]!
79 vld1.8 {q6}, [r11]!
80 vld1.8 {q8}, [r11]!
81 vld1.8 {q10}, [r11]!
82 vld1.8 {q12}, [r11]!
83 vld1.8 {q14}, [r11]!
    [all...]
  /external/tremolo/Tremolo/
mdctLARM.s 186 STMFD r13!,{r4,r6-r11,r14}
198 LDRB r11,[r9],#1 @ r11= *wL++
202 MUL r11,r12,r11 @ r11 = *l * *wL++
204 MLA r6, r7, r6, r11 @ r6 = *--r * *--wR
215 LDMFD r13!,{r4,r6-r11,PC}
226 STMFD r13!,{r4,r6-r11,r14}
237 LDRB r11,[r9],#1 @ r11= *wL+
    [all...]
  /external/openssl/crypto/sha/asm/
sha1-armv4-large.S 24 ldrb r11,[r1,#1]
29 orr r9,r9,r11,lsl#16
49 ldrb r11,[r1,#1]
54 orr r9,r9,r11,lsl#16
74 ldrb r11,[r1,#1]
79 orr r9,r9,r11,lsl#16
99 ldrb r11,[r1,#1]
104 orr r9,r9,r11,lsl#16
124 ldrb r11,[r1,#1]
129 orr r9,r9,r11,lsl#1
    [all...]
  /external/libvpx/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_copyframe_func_neon.asm 25 push {r4 - r11, lr}
34 ldr r11, [r1, #yv12_buffer_config_v_buffer] ;srcptr1
46 str r11, [sp, #12]
55 add r11, r3, r7
72 vst1.8 {q8, q9}, [r11]!
74 vst1.8 {q10, q11}, [r11]!
76 vst1.8 {q12, q13}, [r11]!
78 vst1.8 {q14, q15}, [r11]!
90 sub r11, r5, r10
115 add r11, r3, r
    [all...]
  /external/openssl/crypto/modes/asm/
ghash-armv4.S 28 stmdb sp!,{r3-r11,lr} @ save r3/end too
31 ldmia r12,{r4-r11} @ copy rem_4bit ...
32 stmdb sp!,{r4-r11} @ ... to stack
44 add r11,r1,r14
48 ldmia r11,{r8-r11} @ load Htbl[nhi]
58 eor r7,r11,r7,lsr#4
65 add r11,r1,r12,lsl#4
69 ldmia r11,{r8-r11} @ load Htbl[nlo
    [all...]
  /frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/
Radix4FFT_v5.s 27 stmdb sp!, {r4 - r11, lr}
47 mov r11, r2 @ j = bgn
48 cmp r11, #0
53 str r11, [sp, #20]
62 smulwt r3, r11, r8 @ L_mpy_wx(cosx, t1)
64 smlawb r2, r11, r8, r4 @ r2 = L_mpy_wx(cosx, t0) + L_mpy_wx(sinx, t1)@
68 mov r11, r1, asr #2 @ t1 = r1 >> 2@
74 sub r1, r11, r3 @ r1 = t1 - r3@
77 add r3, r11, r3 @ r3 = t1 + r3@
86 smulwt r5, r11, r8 @ L_mpy_wx(cosx, t1
    [all...]
band_nrg_v5.s 29 stmdb sp!, {r4 - r11, lr}
42 add r11, r1, r2
43 ldrsh r2, [r11, #2]
49 ldr r11, [r0, +r10, lsl #2]
52 smull r11, r7, r11, r11
55 ldr r11, [r0, +r10, lsl #2]
58 smull r11, r7, r11, r1
    [all...]
  /external/llvm/test/MC/ARM/
dot-req.s 5 mov r11, fred
10 @ CHECK: mov r11, r5 @ encoding: [0x05,0xb0,0xa0,0xe1]
  /external/libvpx/libvpx/vp8/encoder/arm/armv6/
vp8_subtract_armv6.asm 81 stmfd sp!, {r4-r11}
96 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A)
99 usub16 r7, r10, r11 ; [d3 | d1] (A)
102 ldr r11, [r5, #4] ; upred (B)
111 uxtb16 r9, r11 ; [p2 | p0] (B)
113 uxtb16 r11, r11, ror #8 ; [p3 | p1] (B)
116 usub16 r7, r10, r11 ; [d3 | d1] (B)
141 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A)
144 usub16 r7, r10, r11 ; [d3 | d1] (A
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/armv6/
loopfilter_v6.asm 64 stmdb sp!, {r4 - r11, lr}
75 ldr r11, [src], pstep ; p1
92 uqsub8 r8, r10, r11 ; p2 - p1
93 uqsub8 r10, r11, r10 ; p1 - p2
99 uqsub8 r6, r11, r12 ; p1 - p0
101 uqsub8 r7, r12, r11 ; p0 - p1
109 uqsub8 r6, r11, r10 ; p1 - q1
110 uqsub8 r7, r10, r11 ; q1 - p1
111 uqsub8 r11, r12, r9 ; p0 - q0
115 orr r12, r11, r12 ; abs (p0-q0
    [all...]

Completed in 486 milliseconds

1 2 3 4 5 6 7 8 91011>>