/external/libvpx/libvpx/vp8/common/arm/neon/ |
bilinearpredict_neon.c | 84 d0 = vmull_u8(vreinterpret_u8_u32(c0.val[0]), filter0); 85 d1 = vmull_u8(vreinterpret_u8_u32(c1.val[0]), filter0); 86 d2 = vmull_u8(a4, filter0); 105 uint16x8_t b0 = vmull_u8(e0, filter0); 106 uint16x8_t b1 = vmull_u8(e1, filter0); 155 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); 156 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); 157 q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); 158 q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); 159 q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8) [all...] |
sixtappredict_neon.c | 125 c0 = vmull_u8(b0, filter0); 126 c1 = vmull_u8(b2, filter0); 127 c2 = vmull_u8(b5, filter5); 128 c3 = vmull_u8(b7, filter5); 226 d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); 227 d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5); 260 d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3); 261 d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3); 302 d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); 303 d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5) [all...] |
/external/libhevc/common/arm/ |
ihevc_intra_pred_chroma_mode_27_to_33.s | 179 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract) 182 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract) 190 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract) 194 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract) 205 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract) 208 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract) 222 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract) 226 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract) 240 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract) 243 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract [all...] |
ihevc_intra_pred_luma_mode_27_to_33.s | 181 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract) 184 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract) 192 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract) 196 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract) 207 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract) 210 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract) 223 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract) 227 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract) 241 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract) 244 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract [all...] |
ihevc_intra_pred_filters_chroma_mode_19_to_25.s | 288 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract) 291 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract) 298 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract) 302 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract) 313 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract) 316 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract) 329 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract) 333 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract) 346 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract) 349 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract [all...] |
ihevc_intra_pred_filters_luma_mode_19_to_25.s | 291 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract) 294 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract) 301 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract) 305 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract) 316 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract) 319 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract) 331 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract) 335 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract) 348 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract) 351 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract [all...] |
ihevc_inter_pred_chroma_horz.s | 175 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 212 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 238 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 250 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 297 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 311 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 319 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 349 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 355 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 405 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3) [all...] |
ihevc_inter_pred_chroma_horz_w16out.s | 194 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 225 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 253 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 266 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 306 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 320 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 328 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 351 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 357 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 402 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3) [all...] |
ihevc_intra_pred_filters_neon_intr.c | 750 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup); 754 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup); 830 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup); 837 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup); [all...] |
ihevc_inter_pred_filters_luma_vert.s | 164 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 182 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 247 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 273 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 353 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 366 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 440 vmull.u8 q0,d5,d23 @mul_res1 = vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)@ 453 vmull.u8 q1,d7,d25 @mul_res2 = vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3)@ 618 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 636 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1) [all...] |
/external/libhevc/encoder/arm/ |
ihevce_ssd_calculator_neon.c | 74 sqabs_low = vmull_u8(vget_low_u8(abs), vget_low_u8(abs)); 75 sqabs_high = vmull_u8(vget_high_u8(abs), vget_high_u8(abs)); 100 sqabs = vmull_u8(abs, abs); 124 sqabs_low = vmull_u8(vget_low_u8(abs), vget_low_u8(abs)); 125 sqabs_high = vmull_u8(vget_high_u8(abs), vget_high_u8(abs)); 155 sqabs_0 = vmull_u8(vget_low_u8(abs_0), vget_low_u8(abs_0)); 156 sqabs_1 = vmull_u8(vget_high_u8(abs_0), vget_high_u8(abs_0)); 157 sqabs_2 = vmull_u8(vget_low_u8(abs_1), vget_low_u8(abs_1)); 158 sqabs_3 = vmull_u8(vget_high_u8(abs_1), vget_high_u8(abs_1)); 206 sqabs_0 = vmull_u8(vget_low_u8(abs_0), vget_low_u8(abs_0)) [all...] |
ihevce_ssd_and_sad_calculator_neon.c | 71 const uint16x8_t sq_abs_l = vmull_u8(abs_l, abs_l); 72 const uint16x8_t sq_abs_h = vmull_u8(abs_h, abs_h); 194 sqabs_l = vmull_u8(abs_l, abs_l); 195 sqabs_h = vmull_u8(abs_h, abs_h); 205 sqabs_l = vmull_u8(abs_l, abs_l); 206 sqabs_h = vmull_u8(abs_h, abs_h); 257 sqabs_l = vmull_u8(abs_l, abs_l); 258 sqabs_h = vmull_u8(abs_h, abs_h); 268 sqabs_l = vmull_u8(abs_l, abs_l); 269 sqabs_h = vmull_u8(abs_h, abs_h) [all...] |
ihevce_scale_by_2_neon.c | 89 p = vreinterpretq_s16_u16(vmull_u8(c, wt_0)); // a[0] * 66 162 p = vreinterpretq_s16_u16(vmull_u8(vget_low_u8(src[c]), wt_0)); 180 p = vreinterpretq_s16_u16(vmull_u8(vget_high_u8(src[c]), wt_0));
|
/external/skia/src/opts/ |
Sk4px_NEON.h | 16 return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)), 17 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec)));
|
/external/skqp/src/opts/ |
Sk4px_NEON.h | 16 return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)), 17 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec)));
|
/external/libaom/libaom/av1/common/arm/ |
blend_a64_hmask_neon.c | 51 res_low = vmull_u8(vget_low_u8(m_q), vget_low_u8(tmp0_q)); 54 res_high = vmull_u8(vget_high_u8(m_q), vget_high_u8(tmp0_q)); 78 res = vmull_u8(m, tmp0); 97 res = vmull_u8(m, tmp0); 121 res = vmull_u8(m, tmp0);
|
blend_a64_vmask_neon.c | 49 res_low = vmull_u8(m, vget_low_u8(tmp0_q)); 51 res_high = vmull_u8(m, vget_high_u8(tmp0_q)); 72 res = vmull_u8(m, tmp0); 96 res = vmull_u8(m, tmp0); 128 res = vmull_u8(m, tmp0);
|
/external/libaom/libaom/aom_dsp/arm/ |
subpel_variance_neon.c | 36 const uint16x8_t a = vmull_u8(src_0, f0); 60 const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0); 63 const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0);
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
subpel_variance_neon.c | 39 const uint16x8_t a = vmull_u8(src_0, f0); 61 const uint16x8_t a = vmull_u8(src_0, f0); 85 const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0); 88 const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0);
|
vpx_convolve8_avg_vert_filter_type1_neon.asm | 82 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 107 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 179 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 208 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 281 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 304 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 399 ; vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1); 418 ; vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3);
|
vpx_convolve8_avg_vert_filter_type2_neon.asm | 83 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 108 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 180 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 209 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 281 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 304 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 400 ; vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1); 419 ; vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3);
|
vpx_convolve8_vert_filter_type1_neon.asm | 83 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 108 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 176 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 202 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 268 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 289 vmlsl.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 376 ; vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1); 396 ; vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3);
|
vpx_convolve8_vert_filter_type2_neon.asm | 83 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 108 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 177 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 203 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 269 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 289 vmlal.u8 q5, d2, d23 ;mul_res2 = vmull_u8(src_tmp3, 375 ; vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1); 394 ; vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3);
|
/external/webp/src/dsp/ |
alpha_processing_neon.c | 27 const uint16x8_t r1 = vmull_u8((V).val[1], alpha); \ 28 const uint16x8_t g1 = vmull_u8((V).val[2], alpha); \ 29 const uint16x8_t b1 = vmull_u8((V).val[(OTHER)], alpha); \
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
vp9_frame_scale_neon.c | 78 const uint16x8_t h0 = vmull_u8(vget_low_u8(in0), coef0); 79 const uint16x8_t h1 = vmull_u8(vget_high_u8(in0), coef0); 80 const uint16x8_t h2 = vmull_u8(vget_low_u8(in2), coef0); 81 const uint16x8_t h3 = vmull_u8(vget_high_u8(in2), coef0); 91 const uint16x8_t v0 = vmull_u8(hor0, coef0); 92 const uint16x8_t v1 = vmull_u8(hor1, coef0); 177 const uint16x8_t h0 = vmull_u8(s[0], coef[0]);
|