/external/libhevc/decoder/arm/ |
ihevcd_fmt_conv_420sp_to_rgba8888.s | 219 VADDW.U8 Q8,Q5,D30 @//Q8 - HAS Y + R 228 VQMOVUN.S16 D16,Q8 233 VZIP.16 Q7,Q8 246 VZIP.32 Q8,Q11 260 VADDW.U8 Q8,Q5,D28 @//Q2 - HAS Y + R 279 VQMOVUN.S16 D16,Q8 284 VZIP.16 Q7,Q8 297 VZIP.32 Q8,Q11 350 VADDW.U8 Q8,Q5,D30 @//Q8 - HAS Y + [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Filt_6k_7k_neon.s | 67 VLD1.S16 {Q8, Q9}, [r7]! @ signal[64] ~ signal[79] 77 VSHR.S16 Q2, Q8, #2 97 VLD1.S16 {Q8}, [r4]! 121 VEXT.8 Q7,Q7,Q8,#2 125 VEXT.8 Q8,Q8,Q15,#2 143 VEXT.8 Q7,Q7,Q8,#2 147 VEXT.8 Q8,Q8,Q15,#2 165 VEXT.8 Q7,Q7,Q8,# [all...] |
scale_sig_neon.s | 64 VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[] 72 VSHLL.S16 Q8, D0, #16 76 VSHL.S32 Q8, Q8, Q14 80 VADDHN.S32 D16, Q8, Q15 84 VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[] 99 VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[] 113 VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[] 127 VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
|
residu_asm_neon.s | 38 VMOV.S32 Q8, #0x8000 113 VQADD.S32 Q10, Q10, Q8
|
Dot_p_neon.s | 43 VLD1.S16 {Q8, Q9}, [r1]! @load 16 Word16 y[]
|
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV7/ |
PrePostMDCT_v7.s | 47 VREV64.32 Q8, Q7 52 VQDMULH.S32 Q11, Q1, Q8 @ MULHIGH(sina, ti1) 53 VQDMULH.S32 Q12, Q0, Q8 @ MULHIGH(cosa, ti1) 109 VREV64.32 Q8, Q6 120 VQDMULH.S32 Q10, Q2, Q8 @ MULHIGH(cosb, tr2) 123 VQDMULH.S32 Q13, Q3, Q8 @ MULHIGH(sinb, tr2)
|
Radix4FFT_v7.s | 90 VADD.S32 Q8, Q10, Q11 @ MULHIGH(cosx, t0) + MULHIGH(sinx, t1) 104 VADD.S32 Q4, Q8, Q6 @ r4 = t0 + r6@ 106 VSUB.S32 Q6, Q8, Q6 @ r6 = t0 - r6@ 109 VADD.S32 Q8, Q0, Q5 @ xptr[0] = r0 + r5@ 118 VSUB.S32 Q8, Q0, Q5 @ xptr[0] = r0 - r5@
|
/external/libhevc/common/arm/ |
ihevc_sao_edge_offset_class1.s | 137 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 158 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 164 VNEG.S8 Q8,Q10 @sign_up = vnegq_s8(sign_down) 170 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 173 VNEG.S8 Q8,Q4 @II sign_up = vnegq_s8(sign_down) 231 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 279 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 300 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 306 VNEG.S8 Q8,Q10 @sign_up = vnegq_s8(sign_down) 310 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 142 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 163 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 169 VNEG.S8 Q8,Q10 @sign_up = vnegq_s8(sign_down) 175 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 179 VNEG.S8 Q8,Q14 @II sign_up = vnegq_s8(sign_down) 244 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 297 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 318 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 324 VNEG.S8 Q8,Q10 @sign_up = vnegq_s8(sign_down) 330 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up [all...] |
ihevc_sao_edge_offset_class0.s | 157 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 165 VSUB.I8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 185 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 192 VSUB.I8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 297 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 299 VSUB.I8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 305 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 307 VSUB.I8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
|
ihevc_sao_edge_offset_class0_chroma.s | 163 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 170 VSUB.U8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 191 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 197 VSUB.U8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 326 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 333 VSUB.U8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 351 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 358 VSUB.U8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
|
ihevc_sao_edge_offset_class2.s | 252 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 256 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 277 VEXT.8 Q9,Q8,Q9,#1 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1) 317 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 349 VEXT.8 Q11,Q8,Q14,#1 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1) 375 VCGT.U8 Q5,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 381 VCLT.U8 Q9,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 458 VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1) 564 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 568 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt) [all...] |
ihevc_sao_edge_offset_class2_chroma.s | 344 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 356 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 372 VEXT.8 Q9,Q8,Q9,#2 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 2) 426 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 460 VEXT.8 Q14,Q8,Q14,#2 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 2) 527 VCGT.U8 Q10,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 529 VCLT.U8 Q11,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 606 VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 2) [all...] |
ihevc_sao_edge_offset_class3.s | 273 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 276 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 292 VEXT.8 Q9,Q9,Q8,#15 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15) 332 VMOV Q6,Q8 370 VEXT.8 Q9,Q9,Q8,#15 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15) 410 VCGT.U8 Q5,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 414 VCLT.U8 Q9,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 495 VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15) 602 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 603 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt) [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 334 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) 338 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 365 VEXT.8 Q9,Q9,Q8,#14 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) 421 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row 460 VEXT.8 Q14,Q14,Q8,#14 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) 517 VCGT.U8 Q11,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) 525 VCLT.U8 Q10,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp) 615 VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) [all...] |
/bionic/libm/upstream-freebsd/lib/msun/bsdsrc/ |
b_tgamma.c | 110 #define Q8 6.13275507472443958924745652239e-06 254 q = Q0 +z*(Q1+z*(Q2+z*(Q3+z*(Q4+z*(Q5+z*(Q6+z*(Q7+z*Q8)))))));
|
/external/libavc/common/arm/ |
ih264_deblk_luma_a9.s | 115 vdup.8 q8, r3 @Q8 contains beta 123 vcge.u8 q12, q12, q8 @Q12=( ABS(q1 - q0) >= Beta ) 124 vcge.u8 q13, q13, q8 @Q13=( ABS(p1 - p0) >= Beta ) 125 vcgt.u8 q10, q8, q14 @Q10=(Ap<Beta) 126 vcgt.u8 q11, q8, q15 @Q11=(Aq<Beta) 140 vrhadd.u8 q8, q3, q0 @Q8 = ((p0+q0+1) >> 1) 153 vshll.u8 q8, d8, #1 @Q13,Q8 = (p1<<1 [all...] |
ih264_iquant_itrans_recon_a9.s | 206 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined 207 vadd.s16 q10, q7, q8 @x0 + x3 and x1+x2 combined 381 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined 383 vadd.s16 q10, q7, q8 @x0 + x3 and x1+x2 combined 506 vld1.32 {q8}, [r0]! @ Q8 = Source row 0 509 vld1.32 {q9}, [r0]! @ Q8 = Source row 1 518 vld1.32 {q8}, [r0]! @ Source Row 2 556 vld1.32 {q8}, [r0]! @ Source row 6 573 vmull.s16 q8, d18, d22 @ i=56..5 [all...] |
ih264_deblk_chroma_a9.s | 112 vdup.8 q8, r3 @Q8 contains beta 116 vcge.u8 q12, q12, q8 @Q12= ( ABS(q1 - q0) >= Beta ) 117 vcge.u8 q13, q13, q8 @Q13= ( ABS(p1 - p0) >= Beta ) 192 vaddl.u8 q8, d3, d7 @(p0 + q1) 197 vmlal.u8 q8, d1, d31 @2*p1 + (p0 + q1) 205 vrshrn.i16 d15, q8, #2 @(2*p1 + (p0 + q1) + 2) >> 2 298 vdup.8 q8, r3 @Q8 contains beta 301 vcge.u8 q12, q12, q8 @Q12= ( ABS(q1 - q0) >= Beta [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm/ |
h264bsdWriteMacroblock.s | 43 qRow8 QN Q8.U8
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/ |
h264bsdWriteMacroblock.S | 45 #define qRow8 Q8
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/ |
omxVCM4P10_PredictIntraChroma_8x8_s.s | 158 qC6 QN Q8.S16 166 qSum6 QN Q8.S16
|
omxVCM4P10_PredictIntra_16x16_s.s | 131 qLeftDiff QN Q8.S16 134 qDiffLeft QN Q8.S16
|
/external/libavc/encoder/arm/ |
ime_distortion_metrics_a9q.s | 383 vabdl.u8 q8, d4, d0 395 vabal.u8 q8, d10, d6 404 vabal.u8 q8, d4, d0 412 vabal.u8 q8, d10, d6 418 vadd.u16 q8, q8, q9 @ Q8 : sad_ref2 465 vabdl.u8 q8, d2, d0 477 vabal.u8 q8, d10, d8 491 vabal.u8 q8, d2, d [all...] |
/external/llvm/lib/Target/AArch64/ |
AArch64PBQPRegAlloc.cpp | 133 case AArch64::Q8:
|