HomeSort by relevance Sort by last modified time
    Searched refs:ASR (Results 1 - 25 of 145) sorted by null

1 2 3 4 5 6

  /external/aac/libSBRdec/src/arm/
env_calc_arm.cpp 118 MOVS r3, r3, ASR #1
126 EOR r4, r4, r4, ASR #31
127 EOR r5, r5, r5, ASR #31
134 EOR r4, r4, r4, ASR #31
135 EOR r5, r5, r5, ASR #31
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
scale_sig_opt.s 46 TEQ r5, r12, ASR r10
47 EORNE r12, r8, r5, ASR #31
50 MOV r12, r11, ASR #16
59 MOV r5, r6, ASR r7 @L_tmp >>= exp
61 MOV r12, r11, ASR #16
cor_h_vec_opt.s 69 MOV r5, r9, ASR #16
70 MOV r6, r10, ASR #16
77 MOV r5, r12, ASR #15
78 MOV r6, r14, ASR #15
116 MOV r5, r9, ASR #16
117 MOV r6, r10, ASR #16
124 MOV r5, r12, ASR #15
125 MOV r6, r14, ASR #15
Deemph_32_opt.s 45 MOV r8, r5, ASR #1 @fac = mu >> 1
55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp)
66 MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
81 MOV r14, r10, ASR #16
93 MOV r14, r10, ASR #16
Filt_6k_7k_opt.s 58 MOV r8, r8, ASR #2
59 MOV r9, r9, ASR #2
62 MOV r11, r11, ASR #2
63 MOV r12, r12, ASR #2
70 MOV r8, r8, ASR #2
71 MOV r9, r9, ASR #2
74 MOV r11, r11, ASR #2
75 MOV r12, r12, ASR #2
169 MOV r1, r14, ASR #15
Norm_Corr_opt.s 96 MOV r6, r7, ASR #1
140 MOV r11, r5, ASR #16 @corr = extract_h(L_tmp)
168 MOV r6, r6, ASR #16 @norm = extract_h(L_tmp)
177 MOVLT r12, r12, ASR r6
181 MOV r12, r12, ASR #16 @vo_round(L_tmp)
209 MOV r8, r14, ASR #15
218 MOV r8, r14, ASR #15
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/arm/
addthumb2err.s 11 add sp, sp, r0, ASR #3
16 adds sp, sp, r0, ASR #3
21 sub sp, sp, r0, ASR #3
26 subs sp, sp, r0, ASR #3
addthumb2err.l 4 [^:]*:11: Error: only LSL shift allowed in thumb mode -- `add sp,sp,r0,ASR#3'
9 [^:]*:16: Error: only LSL shift allowed in thumb mode -- `adds sp,sp,r0,ASR#3'
14 [^:]*:21: Error: only LSL shift allowed in thumb mode -- `sub sp,sp,r0,ASR#3'
19 [^:]*:26: Error: only LSL shift allowed in thumb mode -- `subs sp,sp,r0,ASR#3'
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/vc/m4p10/src/
armVCM4P10_TransformResidual4x4_s.s 185 PKHTB trRow10,in10,in00,ASR #16 ;// [5 4] = [f5:f1]
195 PKHTB trRow30,in12,in02,ASR #16 ;// [13 12] = [7 3]
205 PKHTB trRow12,in30,in20,ASR #16 ;// [7 6] = [f13:f9]
216 PKHTB trRow32,in32,in22,ASR #16 ;// [15 14] = [15 11]
265 PKHTB trCol10,rowOp10,rowOp00,ASR #16 ;// [5 4] = [f5:f1]
275 PKHTB trCol30,rowOp12,rowOp02,ASR #16 ;// [13 12] = [7 3]
285 PKHTB trCol12,rowOp30,rowOp20,ASR #16 ;// [7 6] = [f13:f9]
296 PKHTB trCol32,rowOp32,rowOp22,ASR #16 ;// [15 14] = [15 11]
356 AND colOp00, mask, colOp00, ASR #6
357 AND colOp02, mask, colOp02, ASR #
    [all...]
omxVCM4P10_TransformDequantLumaDCFromPair_s.s 195 PKHTB trRow10,in10,in00,ASR #16 ;// [5 4] = [f5:f1]
205 PKHTB trRow30,in12,in02,ASR #16 ;// [13 12] = [7 3]
215 PKHTB trRow12,in30,in20,ASR #16 ;// [7 6] = [f13:f9]
226 PKHTB trRow32,in32,in22,ASR #16 ;// [15 14] = [15 11]
272 PKHTB trCol10,rowOp10,rowOp00,ASR #16 ;// [5 4] = [f5:f1]
282 PKHTB trCol30,rowOp12,rowOp02,ASR #16 ;// [13 12] = [7 3]
292 PKHTB trCol12,rowOp30,rowOp20,ASR #16 ;// [7 6] = [f13:f9]
303 PKHTB trCol32,rowOp32,rowOp22,ASR #16 ;// [15 14] = [15 11]
370 ASR temp1, temp1, #2 ;// Temp1 = Temp1 >> 2
371 ASR temp3, temp3, #2 ;// Temp3 = Temp3 >>
    [all...]
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s 135 PKHTB ValD, ValD, ValA, ASR #16 ;// [d1 c1 d0 c0]
140 PKHTB ValH, ValH, ValE, ASR #16 ;// [h1 g1 h0 g0]
194 PKHTB Acc0, Acc0, Accx, ASR #16 ;//[0 a3 0 a1] = [0 a1 0 a0] [0 a3 0 a2]
198 PKHTB Acc1, Acc1, Accy, ASR #16 ;//[0 b3 0 b1] = [0 b1 0 b0] [0 b3 0 b2]
204 PKHTB Acc2, Acc2, Accx, ASR #16 ;//[0 c3 0 c1] = [0 c1 0 c0] [0 c3 0 c2]
208 PKHTB Acc3, Acc3, Accy, ASR #16 ;//[0 d3 0 d1] = [0 d1 0 d0] [0 d3 0 d2]
omxVCM4P10_TransformDequantChromaDCFromPair_s.s 129 MOV Temp2, Temp2, ASR #1 ;// Temp2 = Temp2 >> 1 & Temp1 = (Temp1 >> 1) << 16
130 MOV Temp4, Temp4, ASR #1 ;// Temp4 = Temp4 >> 1 & Temp3 = (Temp3 >> 1) << 16
  /external/tremolo/Tremolo/
mdctLARM.s 63 MOV r5, r5, ASR #9 @ r5 = (*--r)>>9
64 MOV r6, r6, ASR #9 @ r6 = (*--r)>>9
65 MOV r7, r7, ASR #9 @ r7 = (*--r)>>9
66 MOV r12,r12,ASR #9 @ r12= (*--r)>>9
68 MOV r14,r12,ASR #15
69 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
70 EORNE r12,r4, r14,ASR #31
73 MOV r14,r7, ASR #15
74 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
75 EORNE r7, r4, r14,ASR #3
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
cor_h_vec_neon.s 70 MOV r5, r9, ASR #16
71 MOV r6, r10, ASR #16
78 MOV r5, r12, ASR #15
79 MOV r6, r14, ASR #15
117 MOV r5, r9, ASR #16
118 MOV r6, r10, ASR #16
125 MOV r5, r12, ASR #15
126 MOV r6, r14, ASR #15
Deemph_32_neon.s 45 MOV r8, r5, ASR #1 @fac = mu >> 1
55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp)
66 MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
81 MOV r14, r10, ASR #16
93 MOV r14, r10, ASR #16
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/bfin/
vector2.s 22 //Dreg = VIT_MAX ( Dreg , Dreg ) (ASR) ; /* shift history bits right (b) */
25 //Dreg_lo = VIT_MAX ( Dreg ) (ASR) ; /* shift history bits right (b) */
27 r7 = vit_max (r1, r0) (asr) ; /* shift right, dual operation */
30 r3 = vit_max (r4, r5) (asr) ; /* shift right, dual operation */
32 r1 = vit_max (r2, r3) (asr) ; /* shift right, dual operation */
34 r7 = vit_max (r0, r1) (asr) ; /* shift right, dual operation */
36 r5 = vit_max (r6, r7) (asr) ; /* shift right, dual operation */
40 r3.l = vit_max (r1)(asr) ; /* shift right, single operation */
43 r2.l = vit_max (r3)(asr) ; /* shift right, single operation */
45 r6.l = vit_max (r7)(asr) ; /* shift right, single operation *
    [all...]
bit.d 34 2a: 08 c6 08 00 BITMUX \(R1, R0, A0\) \(ASR\);
35 2e: 08 c6 13 00 BITMUX \(R2, R3, A0\) \(ASR\);
vector2.d 17 24: 09 c6 01 ce R7 = VIT_MAX \(R1, R0\) \(ASR\);
19 2c: 09 c6 2c c6 R3 = VIT_MAX \(R4, R5\) \(ASR\);
21 34: 09 c6 1a c2 R1 = VIT_MAX \(R2, R3\) \(ASR\);
23 3c: 09 c6 08 ce R7 = VIT_MAX \(R0, R1\) \(ASR\);
25 44: 09 c6 3e ca R5 = VIT_MAX \(R6, R7\) \(ASR\);
27 4c: 09 c6 01 46 R3.L = VIT_MAX \(R1\) \(ASR\);
29 54: 09 c6 03 44 R2.L = VIT_MAX \(R3\) \(ASR\);
31 5c: 09 c6 07 4c R6.L = VIT_MAX \(R7\) \(ASR\);
33 64: 09 c6 04 46 R3.L = VIT_MAX \(R4\) \(ASR\);
35 6c: 09 c6 00 4e R7.L = VIT_MAX \(R0\) \(ASR\);
    [all...]
bit2.s 81 //BITMUX ( Dreg , Dreg , A0 ) (ASR) ; /* shift right, LSB is shifted out (b) */
82 BITMUX(R0, R1, A0)(ASR);
83 BITMUX(R0, R2, A0)(ASR);
84 BITMUX(R1, R3, A0)(ASR);
85 //BITMUX(R0, R0, A0)(ASR);
bit.s 44 BITMUX(R1, R0, A0) (ASR);
45 Bitmux (r2, R3, a0) (aSr);
  /external/sonivox/arm-wt-22k/lib_src/
ARM-E_voice_gain_gnu.s 96 MOV gainLeft, gainLeft, ASR #(NUM_MIXER_GUARD_BITS - 2)
97 MOV gainIncLeft, gainIncLeft, ASR #(SYNTH_UPDATE_PERIOD_IN_BITS + NUM_MIXER_GUARD_BITS - 2)
104 MOV gainRight, gainRight, ASR #(NUM_MIXER_GUARD_BITS - 2)
105 MOV gainIncRight, gainIncRight, ASR #(SYNTH_UPDATE_PERIOD_IN_BITS + NUM_MIXER_GUARD_BITS - 2)
142 MOV gainIncrement, gainIncrement, ASR #SYNTH_UPDATE_PERIOD_IN_BITS
152 MOV tmp0, tmp0, ASR #1 @ add 6dB headroom
ARM-E_filter_gnu.s 81 MOV b2, b2, ASR #1 @ b2 = b2 >> 1
82 MOV K, K, ASR #1 @ K = K >> 1
100 MOV z1, tmp0, ASR #14 @ shift result to low word
113 MOV z1, tmp1, ASR #14 @ shift result to low word
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/
armCOMM_IDCT_s.h 250 MOV xi3, xi3, ASR #SHIFT
255 MOV xi0, xi0, ASR #SHIFT
259 MOV xi1, xi1, ASR #SHIFT
261 MOV xi2, xi2, ASR #SHIFT
275 MOV xi0, xi0, ASR #SHIFT
277 MOV xi2, xi2, ASR #SHIFT
287 MOV xi0, xi0, ASR #SHIFT
289 MOV xi2, xi2, ASR #SHIFT
321 MOV xi7, xi7, ASR #SHIFT
326 MOV xi4, xi4, ASR #SHIF
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/
armCOMM_IDCT_s.h 250 MOV xi3, xi3, ASR #SHIFT
255 MOV xi0, xi0, ASR #SHIFT
259 MOV xi1, xi1, ASR #SHIFT
261 MOV xi2, xi2, ASR #SHIFT
275 MOV xi0, xi0, ASR #SHIFT
277 MOV xi2, xi2, ASR #SHIFT
287 MOV xi0, xi0, ASR #SHIFT
289 MOV xi2, xi2, ASR #SHIFT
321 MOV xi7, xi7, ASR #SHIFT
326 MOV xi4, xi4, ASR #SHIF
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrnb/common/include/
mult.h 118 MOV product, product, ASR #15
141 asm volatile("mov %0, %1, ASR #15"

Completed in 1182 milliseconds

1 2 3 4 5 6