/external/libvpx/libvpx/vp8/common/arm/neon/ |
vp8_loopfilter_neon.c | 215 q3 = vcombine_u8(d6, d7); 216 q4 = vcombine_u8(d8, d9); 217 q5 = vcombine_u8(d10, d11); 218 q6 = vcombine_u8(d12, d13); 219 q7 = vcombine_u8(d14, d15); 220 q8 = vcombine_u8(d16, d17); 221 q9 = vcombine_u8(d18, d19); 222 q10 = vcombine_u8(d20, d21); 366 q3 = vcombine_u8(d6, d7); 367 q4 = vcombine_u8(d8, d9) [all...] |
bilinearpredict_neon.c | 66 a01 = vcombine_u8(a0, a1); 67 a23 = vcombine_u8(a2, a3); 97 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); 115 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1)); 439 q1u8 = vcombine_u8(d2u8, d3u8); 440 q2u8 = vcombine_u8(d4u8, d5u8); 441 q3u8 = vcombine_u8(d6u8, d7u8); 442 q4u8 = vcombine_u8(d8u8, d9u8); 518 q7u8 = vcombine_u8(d14u8, d15u8); 519 q8u8 = vcombine_u8(d16u8, d17u8) [all...] |
mbloopfilter_neon.c | 248 q3 = vcombine_u8(d6, d7); 249 q4 = vcombine_u8(d8, d9); 250 q5 = vcombine_u8(d10, d11); 251 q6 = vcombine_u8(d12, d13); 252 q7 = vcombine_u8(d14, d15); 253 q8 = vcombine_u8(d16, d17); 254 q9 = vcombine_u8(d18, d19); 255 q10 = vcombine_u8(d20, d21); 337 q3 = vcombine_u8(d6, d7); 338 q4 = vcombine_u8(d8, d9) [all...] |
loopfiltersimpleverticaledge_neon.c | 194 q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10 195 q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12 196 q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11 197 q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
|
sixtappredict_neon.c | 155 store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1)); 220 s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1)); 221 s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3)); 272 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2)); 294 s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1)); 295 s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3)); 386 store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
avg_neon.c | 206 const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride)); 208 vcombine_u8(vld1_u8(a + 2 * a_stride), vld1_u8(a + 3 * a_stride)); 210 vcombine_u8(vld1_u8(a + 4 * a_stride), vld1_u8(a + 5 * a_stride)); 212 vcombine_u8(vld1_u8(a + 6 * a_stride), vld1_u8(a + 7 * a_stride)); 214 const uint8x16_t b01 = vcombine_u8(vld1_u8(b), vld1_u8(b + b_stride)); 216 vcombine_u8(vld1_u8(b + 2 * b_stride), vld1_u8(b + 3 * b_stride)); 218 vcombine_u8(vld1_u8(b + 4 * b_stride), vld1_u8(b + 5 * b_stride)); 220 vcombine_u8(vld1_u8(b + 6 * b_stride), vld1_u8(b + 7 * b_stride));
|
vpx_convolve_avg_neon.c | 58 s01 = vcombine_u8(s0, s1); 59 d01 = vcombine_u8(d0, d1);
|
vpx_convolve8_neon.c | 472 vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23))); 560 vrhaddq_u8(vreinterpretq_u8_u32(d0415), vcombine_u8(t0, t1))); 562 vrhaddq_u8(vreinterpretq_u8_u32(d2637), vcombine_u8(t2, t3))); 657 d01 = vcombine_u8(vld1_u8(d + 0 * dst_stride), 659 d23 = vcombine_u8(vld1_u8(d + 2 * dst_stride), 661 d45 = vcombine_u8(vld1_u8(d + 4 * dst_stride), 663 d67 = vcombine_u8(vld1_u8(d + 6 * dst_stride), 665 d01 = vrhaddq_u8(d01, vcombine_u8(t0, t1)); 666 d23 = vrhaddq_u8(d23, vcombine_u8(t2, t3)); 667 d45 = vrhaddq_u8(d45, vcombine_u8(t4, t5)) [all...] |
subpel_variance_neon.c | 69 vst1q_u8(&output_ptr[j], vcombine_u8(out_lo, out_hi));
|
intrapred_neon.c | 95 const uint8x16_t above_and_left = vcombine_u8(above_u8, left_u8); 420 const uint8x16_t L76543210XA0123456 = vcombine_u8(L76543210, XA0123456); 421 const uint8x16_t L6543210XA01234567 = vcombine_u8(L6543210X, A01234567); 422 const uint8x16_t L543210XA01234567_ = vcombine_u8(L543210XA0, A1234567_); 483 const uint8x16_t Lfedcba9876543210 = vcombine_u8(Lfedcba98, L76543210); 499 const uint8x16_t r_7 = vcombine_u8(vget_high_u8(row_0), vget_low_u8(row_1)); 535 const uint8x16_t LLfedcba9876543210 = vcombine_u8(LLfedcba98, LL76543210); 536 const uint8x16_t LUfedcba9876543210 = vcombine_u8(LUfedcba98, LU76543210); [all...] |
transpose_neon.h | 51 b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), 53 b0.val[1] = vcombine_u8(vreinterpret_u8_u32(vget_high_u32(a0)), 536 vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5)); 538 vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7)); [all...] |
loopfilter_neon.c | 38 *blimit_vec = vcombine_u8(vld1_dup_u8(blimit0), vld1_dup_u8(blimit1)); 39 *limit_vec = vcombine_u8(vld1_dup_u8(limit0), vld1_dup_u8(limit1)); 40 *thresh_vec = vcombine_u8(vld1_dup_u8(thresh0), vld1_dup_u8(thresh1)); 210 return vcombine_u8(vrshrn_n_u16(*sum0, 3), vrshrn_n_u16(*sum1, 3)); 227 t = vcombine_u8(vrshrn_n_u16(*sum0, 4), vrshrn_n_u16(*sum1, 4)); 275 *op2 = vcombine_u8(vrshrn_n_u16(sum0, 3), vrshrn_n_u16(sum1, 3)); 379 t = vcombine_u8(vrshrn_n_u16(sum0, 4), vrshrn_n_u16(sum1, 4)); [all...] |
/external/skia/src/opts/ |
Sk4px_NEON.h | 56 return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec), 62 return Sk16b(vcombine_u8(vraddhn_u16(this->fLo.fVec, vrshrq_n_u16(this->fLo.fVec, 8)),
|
SkBitmapFilter_opts.h | [all...] |
SkSwizzler_opts.h | 391 ga.val[0] = vcombine_u8(
|
/external/webp/src/dsp/ |
lossless_enc_neon.c | 41 return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)), 50 return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle),
|
upsampling_neon.c | 124 vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \ 132 vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \
|
dec_neon.c | 73 *p1 = vcombine_u8(row0.val[0], row8.val[0]); 74 *p0 = vcombine_u8(row0.val[1], row8.val[1]); 75 *q0 = vcombine_u8(row0.val[2], row8.val[2]); 76 *q1 = vcombine_u8(row0.val[3], row8.val[3]); 165 *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride)); 166 *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride)); 167 *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride)); 168 *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride)); 169 *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride)); 170 *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride)) [all...] |
lossless_neon.c | 462 const uint8x16_t res2 = vcombine_u8(res, res); \ 511 return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)), 520 return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle),
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
denoising_neon.c | 288 const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); 293 vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); 388 const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); 393 vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); 409 vcombine_u8(v_running_avg_lo, v_running_avg_hi);
|
/external/clang/test/CodeGen/ |
aarch64-neon-vcombine.c | 39 return vcombine_u8(low, high);
|
/prebuilts/gcc/darwin-x86/aarch64/aarch64-linux-android-4.9/lib/gcc/aarch64-linux-android/4.9.x/include/ |
arm_neon.h | 4098 vcombine_u8 (uint8x8_t __a, uint8x8_t __b) function [all...] |
/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/lib/gcc/aarch64-linux-android/4.9.x/include/ |
arm_neon.h | 4098 vcombine_u8 (uint8x8_t __a, uint8x8_t __b) function [all...] |
/external/arm-neon-tests/ |
compute_ref.gccarm | |
/frameworks/ml/nn/common/operations/internal/optimized/ |
depthwiseconv_uint8.h | [all...] |