HomeSort by relevance Sort by last modified time
    Searched refs:r6 (Results 26 - 50 of 406) sorted by null

12 3 4 5 6 7 8 91011>>

  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
Norm_Corr_opt.s 32 @ r6 --- corr_norm[]
73 MOV r6, #0 @L_tmp = 0
81 SMLABB r6, r9, r9, r6 @L_tmp += (xn[i] * xn[i])
82 SMLATT r6, r9, r9, r6 @L_tmp += (xn[i+1] * xn[i+1])
83 SMLABB r6, r10, r10, r6
84 SMLATT r6, r10, r10, r6
    [all...]
convolve_opt.s 41 MOV r6, r0 @ tmpX = x
42 LDRSH r9, [r6], #2 @ *tmpX++
50 LDRSH r9, [r6], #2 @ *tmpX++
52 LDRSH r12, [r6], #2 @ *tmpX++
56 LDRSH r9, [r6], #2 @ *tmpX++
58 LDRSH r12, [r6], #2 @ *tmpX++
76 MOV r6, r0
77 LDRSH r9, [r6], #2 @ *tmpX++
79 LDRSH r12, [r6], #2
89 LDRSH r9, [r6], #2 @ *tmpX+
    [all...]
cor_h_vec_opt.s 31 @r6 ---- cor_2[]
48 MOV r6, #0 @L_sum2 = 0
59 MLA r6, r12, r14, r6
63 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
67 ADD r10, r6, r14
70 MOV r6, r10, ASR #16
76 MUL r14, r6, r11
78 MOV r6, r14, ASR #1
    [all...]
Syn_filt_32_opt.s 34 @ lg --- r6
45 LDRSH r6, [r0] @ load Aq[0]
47 MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new)
50 LDRSH r6, [r0, #2] @ load Aq[1]
54 AND r6, r6, r14
56 ORR r10, r6, r7, LSL #16 @ Aq[2] -- Aq[1]
61 LDRSH r6, [r0, #10] @ load Aq[5]
65 AND r6, r6, r1
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
omxVCM4P10_TransformDequantChromaDCFromPair_s.S 21 ldrb r6, [r9], #1
23 tst r6, #0x10
26 and r7, r8, r6, lsl #1
29 tst r6, #0x20
30 ldreqb r6, [r9], #1
37 ldr r6, .LarmVCM4P10_VMatrixQPModTable
38 P1: add r6, pc
42 ldrsb r2, [r6, r2]
44 ssub16 r6, r3, r4
46 vmov d0, r5, r6
    [all...]
armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S 16 PUSH {r4-r6,lr}
18 LDR r6, =0xfe00fe0
25 UQSUB16 r11,r11,r6
26 UQSUB16 r10,r10,r6
27 UQSUB16 r5,r5,r6
28 UQSUB16 r4,r4,r6
44 POP {r4-r6,pc}
50 PUSH {r4-r6,lr}
51 LDR r6, =0xfe00fe0
59 UQSUB16 r11,r11,r6
    [all...]
  /system/core/libpixelflinger/
t32cb16blend.S 40 * clobbered: r6, r7, lr
58 mov r6, \SRC, lsr #3
59 and r6, r6, #0x1F
60 add lr, r6, lr, lsr #8
66 and r6, \DREG, #(0x3F<<(16 + 5))
67 smulbt r6, r7, r6
70 add r6, lr, r6, lsr #(5+8
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/armv6/
vp8_variance_halfpixvar16x16_h_armv6.asm 40 ldr r6, [r0, #1] ; load 4 src pixels with 1 byte offset
44 mvn r6, r6
45 uhsub8 r4, r4, r6
48 usub8 r6, r4, r5 ; calculate difference
50 sel r7, r6, lr ; select bytes with positive difference
51 usub8 r6, r5, r4 ; calculate difference with reversed operands
53 sel r6, r6, lr ; select bytes with negative difference
57 usad8 r5, r6, lr ; calculate sum of negative difference
    [all...]
vp8_variance_halfpixvar16x16_v_armv6.asm 41 ldr r6, [r9, #0] ; load 4 src pixels from next row
45 mvn r6, r6
46 uhsub8 r4, r4, r6
49 usub8 r6, r4, r5 ; calculate difference
51 sel r7, r6, lr ; select bytes with positive difference
52 usub8 r6, r5, r4 ; calculate difference with reversed operands
54 sel r6, r6, lr ; select bytes with negative difference
58 usad8 r5, r6, lr ; calculate sum of negative difference
    [all...]
dc_only_idct_add_v6.asm 30 ldr r6, [r1], r2
37 uxtab16 r7, r0, r6
38 uxtab16 r6, r0, r6, ror #8
42 usat16 r6, #8, r6
44 orr r7, r7, r6, lsl #8
47 ldr r6, [r1]
52 uxtab16 r7, r0, r6
53 uxtab16 r6, r0, r6, ror #
    [all...]
dequant_idct_v6.asm 32 smulbb r6, r4, r5
38 strh r6, [r0], #2
41 smulbb r6, r4, r5
49 strh r6, [r0], #2
60 ldr r6, [r0, #8]
65 smulwt r9, r3, r6
66 smulwb r7, r3, r6
67 smulwt r10, r4, r6
68 smulwb r8, r4, r6
72 uadd16 r6, r6, r
    [all...]
vp8_variance_halfpixvar16x16_hv_armv6.asm 41 ldr r6, [r0, #1] ; load source pixels b, row N
46 mvn r6, r6
47 uhsub8 r4, r4, r6
59 usub8 r6, r4, r5 ; calculate difference
61 sel r7, r6, lr ; select bytes with positive difference
62 usub8 r6, r5, r4 ; calculate difference with reversed operands
64 sel r6, r6, lr ; select bytes with negative difference
68 usad8 r5, r6, lr ; calculate sum of negative difference
    [all...]
loopfilter_v6.asm 68 ldr r6, [sp, #36] ; load thresh address
77 ldrb r3, [r6] ; thresh
90 uqsub8 r6, r9, r10 ; p3 - p2
95 orr r6, r6, r7 ; abs (p3-p2)
97 uqsub8 lr, r6, r2 ; compare to limit. lr: vp8_filter_mask
99 uqsub8 r6, r11, r12 ; p1 - p0
104 orr r6, r6, r7 ; abs (p1-p0)
105 uqsub8 r7, r6, r2 ; compare to limi
    [all...]
  /bionic/libc/arch-arm/bionic/
syscall.S 34 stmfd sp!, {r4, r5, r6, r7}
39 ldmfd ip, {r3, r4, r5, r6}
41 ldmfd sp!, {r4, r5, r6, r7}
  /external/libvpx/libvpx/vp9/common/arm/neon/
vp9_avg_neon.asm 19 push {r4-r6, lr}
21 mov r6, r2
39 vld1.8 {q8-q9}, [r6@128]!
40 vld1.8 {q10-q11}, [r6@128], r4
49 pop {r4-r6, pc}
54 vld1.8 {q8-q9}, [r6@128], r3
55 vld1.8 {q10-q11}, [r6@128], r3
60 pld [r6]
62 pld [r6, r3]
68 pop {r4-r6, pc
    [all...]
  /external/valgrind/main/none/tests/x86/
incdec_alt.c 7 int r1,r2,r3,r4,r5,r6,r7,r8,a1,a2; variable
40 "\tpopl " VG_SYM(r6) "\n"
58 r1=r2=r3=r4=r5=r6=r7=r8=0;
65 printf("0x%08x\n",r6);
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
cor_h_vec_neon.s 32 @r6 ---- cor_2[]
49 MOV r6, #0 @L_sum2 = 0
60 MLA r6, r12, r14, r6
64 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
68 ADD r10, r6, r14
71 MOV r6, r10, ASR #16
77 MUL r14, r6, r11
79 MOV r6, r14, ASR #1
    [all...]
Norm_Corr_neon.s 32 @ r6 --- corr_norm[]
98 QADD r6, r9, r10
99 QADD r6, r6, r6
100 QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1;
102 SUB r6, r7, #1 @exp = norm_l(L_tmp)
103 RSB r7, r6, #32 @exp = 32 - exp
104 MOV r6, r7, ASR #1
105 RSB r7, r6, #0 @scale = -(exp >> 1
    [all...]
  /external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/lc3b/tests/
lc3b-basic.asm 1 add r7, r6, r5
  /external/jpeg/
armv6_idct.S 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14}
98 ldrsh r6, [r14, #96]
108 orreqs r8, r6, r7
115 // r6 = q[6] * c[6] + r2;
118 mla r6, r11, r6, r2
124 // r2 = r2 * 2 - r6 = -(r6 - r2 * 2);
126 rsb r2, r6, r2, lsl #1
160 // r0 = r0 + r6;
    [all...]
  /external/qemu/distrib/jpeg-6b/
armv6_idct.S 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14}
98 ldrsh r6, [r14, #96]
108 orreqs r8, r6, r7
115 // r6 = q[6] * c[6] + r2;
118 mla r6, r11, r6, r2
124 // r2 = r2 * 2 - r6 = -(r6 - r2 * 2);
126 rsb r2, r6, r2, lsl #1
160 // r0 = r0 + r6;
    [all...]
  /frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/
idct.cpp 131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ local
155 r6 = blk[B_SIZE * 5 + i];
158 if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7))
187 r8 = W3 * (r6 + r7);
188 r6 = (r8 - (W3 - W5) * r6);
199 r1 = r4 + r6;
200 r4 -= r6;
201 r6 = r5 + r7;
221 tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8
353 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; \/* butterfly nodes *\/ local
    [all...]
  /frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/
pvmp3_polyphase_filter_window_arm.s 71 ldr r6,[r12,#0x780]
73 smlal r2,r11,lr,r6
77 smull r6,r5,r2,r6
82 smlal r6,r9,r5,r7
83 smull r6,r2,r5,r8
90 ldr r6,[r12,#0x680]
92 smlal lr,r11,r2,r6
96 smull r6,r5,r2,r6
    [all...]
pvmp3_polyphase_filter_window_gcc.s 71 ldr r6,[r12,#0x780]
73 smlal r2,r11,lr,r6
77 smull r6,r5,r2,r6
82 smlal r6,r9,r5,r7
83 smull r6,r2,r5,r8
90 ldr r6,[r12,#0x680]
92 smlal lr,r11,r2,r6
96 smull r6,r5,r2,r6
    [all...]
  /frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/
band_nrg_v5.s 51 ldr r6, [r0, +r10, lsl #2]
54 smull r6, r8, r6, r6
59 ldr r6, [r0, +r10, lsl #2]
61 smull r6, r8, r6, r6
99 mov r6, #0
129 qadd r6, r6, r
    [all...]

Completed in 975 milliseconds

12 3 4 5 6 7 8 91011>>