/external/libxaac/decoder/armv7/ |
ia_xheaacd_mps_mulshift.s | 35 VQDMULL.S32 Q6, D1, D5 37 VUZP.32 Q4, Q6 39 VST1.32 {Q6, Q7}, [R2]! @Storing values to R2
|
ixheaacd_mps_synt_post_fft_twiddle.s | 45 VMULL.S32 Q6, D1, D5 50 VSHRN.S64 D12, Q6, #31
|
ixheaacd_mps_synt_out_calc.s | 32 VMULL.S32 Q6, D2, D5 36 VSHRN.S64 D12, Q6, #31
|
ixheaacd_tns_ar_filter_fixed.s | 164 VEXT.32 Q6, Q7, Q6, #3 186 VEXT.32 Q11, Q6, Q11, #3 203 VEXT.32 Q6, Q7, Q6, #3 234 VEXT.32 Q11, Q6, Q11, #3 250 VEXT.32 Q6, Q7, Q6, #3 281 VEXT.32 Q11, Q6, Q11, #3 297 VEXT.32 Q6, Q7, Q6, # [all...] |
ixheaacd_calc_post_twid.s | 45 VMULL.S32 Q6, D6, D0 54 VSHRN.S64 D10, Q6, #32
|
ixheaacd_calc_pre_twid.s | 49 VMULL.S32 Q6, D0, D5 59 VSHRN.S64 D12, Q6, #32
|
ixheaacd_dec_DCT2_64_asm.s | 135 VSUB.I32 Q6, Q8, Q15 137 VREV64.32 Q6, Q6 150 VST2.32 {Q6, Q7}, [R12] 186 VSUB.I32 Q6, Q8, Q15 189 VREV64.32 Q6, Q6 205 VST2.32 {Q6, Q7}, [R12] 239 VSUB.I32 Q6, Q8, Q15 241 VREV64.32 Q6, Q [all...] |
ixheaacd_esbr_cos_sin_mod_loop2.s | 75 vadd.I64 q6, q4, q3 79 VSHRN.I64 D12, Q6, #32 106 vadd.I64 q6, q4, q3 110 VSHRN.I64 D12, Q6, #32 135 vadd.I64 q6, q2, q5 139 VSHRN.I64 D12, Q6, #32
|
ia_xheaacd_mps_reoder_mulshift_acc.s | 62 VLD1.32 {Q6, Q7}, [R3]! @LOADING values from R3 N.imag_fix 106 VLD1.32 {Q6, Q7}, [R3]! @LOADING values from R3 N.imag_fix 150 VLD1.32 {Q6, Q7}, [R3]! @LOADING values from R3 N.imag_fix 194 VLD1.32 {Q6, Q7}, [R3]! @LOADING values from R3 N.imag_fix
|
ixheaacd_esbr_fwd_modulation.s | 54 VADD.S32 Q6, Q0, Q2
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Filt_6k_7k_neon.s | 66 VLD1.S16 {Q6, Q7}, [r7]! @ signal[48] ~ signal[63] 75 VSHR.S16 Q10, Q6, #2 96 VLD1.S16 {Q6, Q7}, [r4]! @x[16] ~ X[31] 111 VEXT.8 Q5,Q5,Q6,#2 116 VEXT.8 Q6,Q6,Q7,#2 133 VEXT.8 Q5,Q5,Q6,#2 138 VEXT.8 Q6,Q6,Q7,#2 155 VEXT.8 Q5,Q5,Q6,# [all...] |
Dot_p_neon.s | 42 VLD1.S16 {Q6, Q7}, [r0]! @load 16 Word16 x[] 79 VLD1.S16 {Q6, Q7}, [r0]!
|
Norm_Corr_neon.s | 76 VLD1.S16 {Q6, Q7}, [r14]! 119 VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[] 140 VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
|
pred_lt4_1_neon.s | 57 VLD1.S16 {Q6, Q7}, [r4]! @load 16 x[]
|
/external/libhevc/decoder/arm/ |
ihevcd_fmt_conv_420sp_to_rgba8888.s | 198 VMULL.S16 Q6,D4,D0[1] @//(U-128)*C2 FOR G 199 VMLAL.S16 Q6,D6,D0[2] @//Q6 = (U-128)*C2 + (V-128)*C3 214 VQSHRN.S32 D12,Q6,#13 @//D12 = [(U-128)*C2 + (V-128)*C3]>>13 4 16-BIT VALUES 216 @//Q6 - WEIGHT FOR G 220 VADDW.U8 Q9,Q6,D30 @//Q9 - HAS Y + G 224 VADDW.U8 Q12,Q6,D31 @//Q12 - HAS Y + G 261 VADDW.U8 Q9,Q6,D28 @//Q3 - HAS Y + G 265 VADDW.U8 Q12,Q6,D29 @//Q12 - HAS Y + G 329 VMULL.S16 Q6,D4,D0[1] @//(U-128)*C2 FOR [all...] |
/external/libxaac/decoder/ |
ixheaacd_constants.h | 32 #define Q6 64
|
/external/libhevc/common/arm/ |
ihevc_sao_edge_offset_class0_chroma.s | 170 VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14) 174 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 178 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 198 VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2) 202 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 205 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 234 VMOVL.U8 Q6,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 244 VADDW.S8 Q6,Q6,D17 @pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) 246 VMAX.S16 Q6,Q6,Q2 @pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_cli (…) [all...] |
ihevc_sao_edge_offset_class1.s | 142 VCGT.U8 Q6,Q5,Q4 @vcgtq_u8(pu1_cur_row, pu1_top_row) 147 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 157 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row) 165 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 168 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 171 VADD.I8 Q6,Q6,Q10 @edge_idx = vaddq_s8(edge_idx, sign_down) 236 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row) 238 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 287 VCGT.U8 Q6,Q5,Q4 @vcgtq_u8(pu1_cur_row, pu1_top_row [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 146 VCGT.U8 Q6,Q5,Q14 @vcgtq_u8(pu1_cur_row, pu1_top_row) 151 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 161 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row) 169 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 172 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 175 VADD.I8 Q6,Q6,Q10 @edge_idx = vaddq_s8(edge_idx, sign_down) 248 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row) 250 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 304 VCGT.U8 Q6,Q5,Q14 @vcgtq_u8(pu1_cur_row, pu1_top_row [all...] |
ihevc_sao_edge_offset_class0.s | 163 VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) 167 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 172 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 187 VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1) 195 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 198 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 305 VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) 307 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 308 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 313 VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1 [all...] |
ihevc_sao_edge_offset_class2.s | 257 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 261 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 302 VCGT.U8 Q5,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 305 VCLT.U8 Q9,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 326 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 369 VCGT.U8 Q12,Q6,Q11 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 373 VCLT.U8 Q11,Q6,Q11 @II vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 434 VMOV Q6,Q15 @III pu1_cur_row = pu1_next_row 470 VCGT.U8 Q5,Q6,Q9 @vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 474 VCLT.U8 Q9,Q6,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row_tmp [all...] |
ihevc_sao_edge_offset_class2_chroma.s | 350 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 354 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 412 VCGT.U8 Q10,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 414 VCLT.U8 Q11,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 436 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 486 VCGT.U8 Q11,Q6,Q14 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 494 VCLT.U8 Q12,Q6,Q14 @II vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 569 VMOV Q6,Q15 @III pu1_cur_row = pu1_next_row [all...] |
ihevc_sao_edge_offset_class3.s | 272 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 282 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 317 VCGT.U8 Q5,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 318 VCLT.U8 Q9,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 341 VMOV Q6,Q8 394 VCGT.U8 Q12,Q6,Q9 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 398 VCLT.U8 Q13,Q6,Q9 @II vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 467 VMOV Q6,Q15 @II pu1_cur_row = pu1_next_row 508 VCGT.U8 Q12,Q6,Q9 @vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 511 VCLT.U8 Q13,Q6,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row_tmp [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 340 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 344 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 404 VCGT.U8 Q10,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 406 VCLT.U8 Q11,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 431 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 487 VCGT.U8 Q11,Q6,Q14 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 497 VCLT.U8 Q12,Q6,Q14 @II vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 574 VMOV Q6,Q15 @III pu1_cur_row = pu1_next_row [all...] |
/external/llvm/lib/Target/AArch64/ |
AArch64CallingConvention.h | 42 AArch64::Q6, AArch64::Q7};
|