/external/libhevc/decoder/arm/ |
ihevcd_fmt_conv_420sp_to_rgba8888.s | 200 VMULL.S16 Q7,D5,D0[1] @//(U-128)*C2 FOR G 201 VMLAL.S16 Q7,D7,D0[2] @//Q7 = (U-128)*C2 + (V-128)*C3 215 VQSHRN.S32 D13,Q7,#13 @//D13 = [(U-128)*C2 + (V-128)*C3]>>13 4 16-BIT VALUES 218 VADDW.U8 Q7,Q4,D30 @//Q7 - HAS Y + B 226 VQMOVUN.S16 D14,Q7 233 VZIP.16 Q7,Q8 245 VZIP.32 Q7,Q10 259 VADDW.U8 Q7,Q4,D28 @//Q7 - HAS Y + [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Filt_6k_7k_neon.s | 66 VLD1.S16 {Q6, Q7}, [r7]! @ signal[48] ~ signal[63] 76 VSHR.S16 Q11, Q7, #2 96 VLD1.S16 {Q6, Q7}, [r4]! @x[16] ~ X[31] 116 VEXT.8 Q6,Q6,Q7,#2 121 VEXT.8 Q7,Q7,Q8,#2 138 VEXT.8 Q6,Q6,Q7,#2 143 VEXT.8 Q7,Q7,Q8,#2 160 VEXT.8 Q6,Q6,Q7,# [all...] |
syn_filt_neon.s | 82 VDUP.S32 Q7, D10[0] 84 VSUB.S32 Q9, Q10, Q7
|
Dot_p_neon.s | 42 VLD1.S16 {Q6, Q7}, [r0]! @load 16 Word16 x[] 79 VLD1.S16 {Q6, Q7}, [r0]!
|
Norm_Corr_neon.s | 76 VLD1.S16 {Q6, Q7}, [r14]! 119 VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[] 140 VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
|
pred_lt4_1_neon.s | 57 VLD1.S16 {Q6, Q7}, [r4]! @load 16 x[]
|
scale_sig_neon.s | 70 VLD1.S16 {Q6, Q7}, [r5]! @load 16 Word16 x[]
|
/external/libhevc/common/arm/ |
ihevc_sao_edge_offset_class0.s | 153 VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) 157 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 162 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 177 VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1) 185 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 188 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 195 VADD.I8 Q7,Q1,Q10 @edge_idx = vaddq_s8(const_2, sign_left) 198 VADD.I8 Q7,Q7,Q11 @edge_idx = vaddq_s8(edge_idx, sign_right [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 159 VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14) 163 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 167 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 187 VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2) 191 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 194 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 200 VADD.U8 Q7,Q1,Q10 @edge_idx = vaddq_s8(const_2, sign_left) 202 VADD.U8 Q7,Q7,Q11 @edge_idx = vaddq_s8(edge_idx, sign_right [all...] |
ihevc_sao_edge_offset_class2.s | 248 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 256 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 294 VADD.I8 Q12,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 305 VNEG.S8 Q7,Q5 @I sign_up = vnegq_s8(sign_down) 307 VEXT.8 Q7,Q7,Q7,#15 @I sign_up = vextq_s8(sign_up, sign_up, 15) 378 VADD.I8 Q11,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 383 VNEG.S8 Q7,Q12 @II sign_up = vnegq_s8(sign_down [all...] |
ihevc_sao_edge_offset_class2_chroma.s | 340 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 356 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 407 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 411 VNEG.S8 Q7,Q11 @I sign_up = vnegq_s8(sign_down) 414 VEXT.8 Q7,Q7,Q7,#14 @I sign_up = vextq_s8(sign_up, sign_up, 14) 500 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 510 VNEG.S8 Q7,Q12 @II sign_up = vnegq_s8(sign_down [all...] |
ihevc_sao_edge_offset_class3.s | 263 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 276 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 312 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 315 VNEG.S8 Q7,Q5 @I sign_up = vnegq_s8(sign_down) 317 VEXT.8 Q7,Q7,Q7,#1 @I sign_up = vextq_s8(sign_up, sign_up, 1) 401 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 403 VNEG.S8 Q7,Q12 @II sign_up = vnegq_s8(sign_down [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 330 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) 338 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 399 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 402 VNEG.S8 Q7,Q11 @I sign_up = vnegq_s8(sign_down) 405 VEXT.8 Q7,Q7,Q7,#2 @I sign_up = vextq_s8(sign_up, sign_up, 2) 503 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 510 VNEG.S8 Q7,Q12 @II sign_up = vnegq_s8(sign_down [all...] |
ihevc_sao_edge_offset_class1.s | 135 VCLT.U8 Q7,Q5,Q4 @vcltq_u8(pu1_cur_row, pu1_top_row) 137 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 152 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row) 155 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 227 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row) 228 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 278 VCLT.U8 Q7,Q5,Q4 @vcltq_u8(pu1_cur_row, pu1_top_row) 279 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 294 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row) 297 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt) [all...] |
ihevc_sao_edge_offset_class1_chroma.s | 140 VCLT.U8 Q7,Q5,Q14 @vcltq_u8(pu1_cur_row, pu1_top_row) 142 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 157 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row) 160 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 240 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row) 241 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 296 VCLT.U8 Q7,Q5,Q14 @vcltq_u8(pu1_cur_row, pu1_top_row) 297 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 312 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row) 315 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt) [all...] |
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV7/ |
Radix4FFT_v7.s | 97 VQDMULH.S32 Q11, Q7, Q15 @ MULHIGH(sinx, t1) 98 VQDMULH.S32 Q12, Q7, Q14 @ MULHIGH(cosx, t1) 102 VSUB.S32 Q7, Q12, Q13 @ MULHIGH(cosx, t1) - MULHIGH(sinx, t0) 105 VSUB.S32 Q5, Q7, Q9 @ r5 = r7 - t1@ 107 VADD.S32 Q7, Q7, Q9 @ r7 = r7 + t1@ 115 VSUB.S32 Q11, Q3, Q7 @ xptr[1] = r3 - r7@ 125 VADD.S32 Q11, Q3, Q7 @ xptr[1] = r3 + r7@
|
PrePostMDCT_v7.s | 47 VREV64.32 Q8, Q7 110 VREV64.32 Q9, Q7
|
/external/llvm/lib/Target/AArch64/ |
AArch64CallingConvention.h | 42 AArch64::Q6, AArch64::Q7};
|
/bionic/libm/upstream-freebsd/lib/msun/bsdsrc/ |
b_tgamma.c | 109 #define Q7 9.35021023573788935372153030556e-05 254 q = Q0 +z*(Q1+z*(Q2+z*(Q3+z*(Q4+z*(Q5+z*(Q6+z*(Q7+z*Q8)))))));
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/ |
omxVCM4P10_PredictIntraChroma_8x8_s.s | 135 qLeft7minus0 QN Q7.S16 157 qC5 QN Q7.S16 165 qSum5 QN Q7.S16
|
omxVCM4P10_PredictIntra_16x16_s.s | 91 qC QN Q7.S16 147 qLeft15minus0 QN Q7.S16
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm/ |
h264bsdWriteMacroblock.s | 42 qRow7 QN Q7.U8 91 VPUSH {q4-q7} 148 VPOP {q4-q7}
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/ |
h264bsdWriteMacroblock.S | 44 #define qRow7 Q7 93 VPUSH {q4-q7} 150 VPOP {q4-q7}
|
/external/libavc/common/arm/ |
ih264_deblk_luma_a9.s | 117 vmovl.u16 q7, d14 @ 121 vsli.32 q7, q7, #8 @ 127 vsli.32 q7, q7, #16 @Q7 = C0 139 vsub.i8 q9, q7, q10 @Q9 = C0 + (Ap < Beta) 163 vneg.s8 q13, q7 @Q13 = -C0 164 vmin.s8 q14, q14, q7 @Q14 = min(C0,i_macro_p1) 171 vmin.s8 q15, q15, q7 @Q15 = min(C0,i_macro_q1 [all...] |
ih264_iquant_itrans_recon_a9.s | 206 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined 207 vadd.s16 q10, q7, q8 @x0 + x3 and x1+x2 combined 381 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined 383 vadd.s16 q10, q7, q8 @x0 + x3 and x1+x2 combined 536 vmull.s16 q7, d19, d23 @ p[i] = (x[i] * trns_coeff[i]) where i=28..31 546 vshl.s32 q7, q7, q15 @ 553 vqrshrn.s32 d7, q7, #0x6 @ D7 = c[i] = ((q[i] + 32) >> 6) where i = 28..31 557 vmull.s16 q7, d19, d23 @ 564 vshl.s32 q7, q7, q15 [all...] |