HomeSort by relevance Sort by last modified time
    Searched refs:vmull_u8 (Results 1 - 25 of 46) sorted by null

1 2

  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/neon/
bilinearpredict_neon.c 83 q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
84 q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
85 q9u16 = vmull_u8(d6u8, d0u8);
109 q1u16 = vmull_u8(d28u8, d0u8);
110 q2u16 = vmull_u8(d29u8, d0u8);
161 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
162 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
163 q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
164 q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
165 q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8)
    [all...]
sixtappredict_neon.c 97 q3u16 = vmull_u8(d27u8, d0u8);
98 q4u16 = vmull_u8(d28u8, d0u8);
99 q5u16 = vmull_u8(d25u8, d5u8);
100 q6u16 = vmull_u8(d26u8, d5u8);
177 q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
178 q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8);
224 q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8);
225 q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8);
273 q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
274 q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8)
    [all...]
vp8_subpixelvariance_neon.c 73 q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
74 q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
75 q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
76 q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
77 q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
78 q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
79 q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
80 q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
142 q7u16 = vmull_u8(d2u8, d0u8);
143 q8u16 = vmull_u8(d3u8, d0u8)
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/neon/
bilinearpredict_neon.c 80 q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
81 q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
82 q9u16 = vmull_u8(d6u8, d0u8);
106 q1u16 = vmull_u8(d28u8, d0u8);
107 q2u16 = vmull_u8(d29u8, d0u8);
158 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
159 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
160 q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
161 q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
162 q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8)
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/
bilinearpredict_neon.c 80 q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
81 q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
82 q9u16 = vmull_u8(d6u8, d0u8);
106 q1u16 = vmull_u8(d28u8, d0u8);
107 q2u16 = vmull_u8(d29u8, d0u8);
158 q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
159 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
160 q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
161 q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
162 q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8)
    [all...]
  /external/libhevc/common/arm/
ihevc_intra_pred_chroma_mode_27_to_33.s 175 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
178 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
186 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
190 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
201 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
204 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
218 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
222 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
236 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
239 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract
    [all...]
ihevc_intra_pred_filters_luma_mode_19_to_25.s 287 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
290 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
297 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
301 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
312 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
315 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
327 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
331 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
344 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
347 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract
    [all...]
ihevc_intra_pred_luma_mode_27_to_33.s 178 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
181 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
189 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
193 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
204 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
207 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
220 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
224 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
238 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
241 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract
    [all...]
ihevc_intra_pred_filters_chroma_mode_19_to_25.s 284 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
287 vmlal.u8 q5,d9,d31 @(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
294 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
298 vmlal.u8 q7,d13,d29 @(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
309 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
312 vmlal.u8 q9,d17,d27 @(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
325 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
329 vmlal.u8 q11,d21,d25 @(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
342 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
345 vmlal.u8 q5,d9,d31 @(v)vmull_u8(ref_main_idx_1, dup_const_fract
    [all...]
ihevc_inter_pred_chroma_horz.s 170 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
206 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
233 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
246 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
293 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
307 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
315 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
345 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
351 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
401 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)
    [all...]
ihevc_inter_pred_chroma_horz_w16out.s 190 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
221 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
249 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
262 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
302 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
316 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
324 vmull.u8 q15,d2,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
347 vmull.u8 q11,d10,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
353 vmull.u8 q10,d11,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
398 vmull.u8 q4,d1,d25 @mul_res = vmull_u8(src[0_3], coeffabs_3)
    [all...]
ihevc_intra_pred_filters_neon_intr.c 749 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup);
753 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup);
829 prod_t1 = vmull_u8(const_nt_1_col_t, pu1_ref_two_nt_1_row_dup);
836 prod_t2 = vmull_u8(const_col_1_t, pu1_ref_three_nt_1_dup);
    [all...]
ihevc_inter_pred_filters_luma_vert.s 158 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
176 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
241 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
267 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
347 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
360 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
433 vmull.u8 q0,d5,d23 @mul_res1 = vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)@
446 vmull.u8 q1,d7,d25 @mul_res2 = vmull_u8(vreinterpret_u8_u32(src_tmp4), coeffabs_3)@
608 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
626 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 148 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
220 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
236 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
305 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
319 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
ihevc_inter_pred_luma_vert_w16inp_w16out.s 158 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
233 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
250 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
322 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
337 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
ihevc_inter_pred_filters_luma_horz.s 218 vmlal.u8 q4,d3,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
233 vmull.u8 q5,d15,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
298 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
343 vmlal.u8 q5,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
397 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
ihevc_inter_pred_luma_horz_w16out.s 383 vmlal.u8 q4,d3,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
398 vmull.u8 q5,d15,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
465 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
507 vmlal.u8 q5,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
560 vmlal.u8 q4,d6,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@
ihevc_inter_pred_chroma_vert.s 143 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
188 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
  /external/chromium_org/third_party/skia/src/opts/
SkBitmapProcState_filter_neon.h 43 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
44 tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
78 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
79 tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
SkXfermode_opts_arm_neon.cpp 20 tmp = vmull_u8(color, alpha);
32 ret = vmull_u8(color, alpha);
124 vsrc_wide = vmull_u8(vda, vreinterpret_u8_u32(vdup_n_u32(src)));
125 vdst_wide = vmull_u8(visa, vreinterpret_u8_u32(vdup_n_u32(dst)));
154 vsrc_wide = vmull_u8(vida, vreinterpret_u8_u32(vdup_n_u32(src)));
155 vdst_wide = vmull_u8(vsa, vreinterpret_u8_u32(vdup_n_u32(dst)));
188 vsrc_wide = vmull_u8(vsrc, vida);
189 vdst_wide = vmull_u8(vdst, visa);
225 vres_wide = vmull_u8(vsrc, vdst);
415 uint16x8_t scda = vmull_u8(sc, da)
    [all...]
SkBlitRow_opts_arm_neon.cpp 541 vres_a = vmull_u8(vsrc.val[NEON_A], valpha);
542 vres_r = vmull_u8(vsrc.val[NEON_R], valpha);
543 vres_g = vmull_u8(vsrc.val[NEON_G], valpha);
544 vres_b = vmull_u8(vsrc.val[NEON_B], valpha);
    [all...]
  /external/skia/src/opts/
SkBitmapProcState_filter_neon.h 43 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
44 tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
78 tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
79 tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
SkXfermode_opts_arm_neon.cpp 20 tmp = vmull_u8(color, alpha);
32 ret = vmull_u8(color, alpha);
124 vsrc_wide = vmull_u8(vda, vreinterpret_u8_u32(vdup_n_u32(src)));
125 vdst_wide = vmull_u8(visa, vreinterpret_u8_u32(vdup_n_u32(dst)));
154 vsrc_wide = vmull_u8(vida, vreinterpret_u8_u32(vdup_n_u32(src)));
155 vdst_wide = vmull_u8(vsa, vreinterpret_u8_u32(vdup_n_u32(dst)));
188 vsrc_wide = vmull_u8(vsrc, vida);
189 vdst_wide = vmull_u8(vdst, visa);
225 vres_wide = vmull_u8(vsrc, vdst);
415 uint16x8_t scda = vmull_u8(sc, da)
    [all...]
SkBlitRow_opts_arm_neon.cpp 443 vres_a = vmull_u8(vsrc.val[NEON_A], valpha);
444 vres_r = vmull_u8(vsrc.val[NEON_R], valpha);
445 vres_g = vmull_u8(vsrc.val[NEON_G], valpha);
446 vres_b = vmull_u8(vsrc.val[NEON_B], valpha);
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/
vp9_variance_neon.c 122 const uint16x8_t a = vmull_u8(src_0, f0);
146 const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0);
149 const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0);

Completed in 994 milliseconds

1 2