/external/libhevc/common/arm64/ |
ihevc_sao_edge_offset_class2.s | 335 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 340 Uxtl2 v22.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 424 Uxtl v26.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 433 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 443 Uxtl2 v28.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 451 Uxtl2 v18.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 513 Uxtl v20.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 518 Uxtl2 v5.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class3.s | 344 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 349 Uxtl2 v22.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 441 Uxtl v28.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 460 Uxtl2 v26.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 467 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 474 Uxtl2 v22.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 533 Uxtl v20.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 536 Uxtl2 v22.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 241 Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 250 Uxtl2 v19.8h, v19.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 281 Uxtl v28.8h, v30.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 295 Uxtl2 v30.8h, v30.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 424 Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 426 Uxtl v24.8h, v30.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class0.s | 199 Uxtl v18.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 211 Uxtl v0.8h, v26.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 226 Uxtl2 v21.8h, v17.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 240 Uxtl2 v28.8h, v26.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 319 Uxtl v28.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_sao_edge_offset_class2_chroma.s | 469 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 473 Uxtl2 v18.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 612 Uxtl v28.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 628 Uxtl2 v26.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 642 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) [all...] |
ihevc_sao_edge_offset_class3_chroma.s | 454 Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 467 Uxtl2 v18.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 602 Uxtl v28.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 610 Uxtl2 v26.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 614 Uxtl v20.8h, v16.8b //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 627 Uxtl2 v18.8h, v16.16b //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) [all...] |
/external/libhevc/common/arm/ |
ihevc_sao_edge_offset_class0_chroma.s | 228 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 234 VMOVL.U8 Q6,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 256 VMOVL.U8 Q14,D30 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 269 VMOVL.U8 Q15,D31 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) 393 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) 395 VMOVL.U8 Q12,D30 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
|
ihevc_intra_pred_filters_neon_intr.c | [all...] |
/external/webp/src/dsp/ |
dec_neon.c | 380 return vreinterpretq_s16_u16(vmovl_u8(v)); [all...] |
yuv_neon.c | 28 const uint16x8_t r = vmovl_u8(R); 29 const uint16x8_t g = vmovl_u8(G); 30 const uint16x8_t b = vmovl_u8(B);
|
lossless_neon.c | 171 const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(avg)); 419 const uint16x8_t res16 = vmovl_u8(res); \ 428 uint16x8_t L = vmovl_u8(LOAD_U32_AS_U8(out[-1])); 458 const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(LOW_OR_HI(avg))); \
|
filters_neon.c | 34 #define U8_TO_S16(A) vreinterpretq_s16_u16(vmovl_u8(A))
|
enc_neon.c | 41 return vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(v))); [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
idct_neon.h | 245 c0 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b0)), a0, 6); 246 c1 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b1)), a1, 6); 247 c2 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b2)), a2, 6); 248 c3 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b3)), a3, 6); 249 c4 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b4)), a4, 6); 250 c5 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b5)), a5, 6); 251 c6 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b6)), a6, 6); 252 c7 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b7)), a7, 6); [all...] |
idct32x32_add_neon.c | 102 d[0] = vmovl_u8(vqmovun_s16(q0)); 103 d[1] = vmovl_u8(vqmovun_s16(q1)); 104 d[2] = vmovl_u8(vqmovun_s16(q2)); 105 d[3] = vmovl_u8(vqmovun_s16(q3));
|
highbd_loopfilter_neon.c | 21 *blimit_vec = vmovl_u8(vld1_dup_u8(blimit)); 22 *limit_vec = vmovl_u8(vld1_dup_u8(limit)); 23 *thresh_vec = vmovl_u8(vld1_dup_u8(thresh));
|
intrapred_neon.c | 877 return vreinterpretq_s16_u16(vmovl_u8(v)); [all...] |
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
optimized_ops.h | 425 input_val[0] = vreinterpretq_s16_u16(vmovl_u8(low)); 426 input_val[1] = vreinterpretq_s16_u16(vmovl_u8(high)); 433 filter_val[k][0] = vreinterpretq_s16_u16(vmovl_u8(low)); 434 filter_val[k][1] = vreinterpretq_s16_u16(vmovl_u8(high)); 457 input_val = vreinterpretq_s16_u16(vmovl_u8(input_val_u8)); 461 filter_val[k] = vreinterpretq_s16_u16(vmovl_u8(filter_val_u8[k])); [all...] |
/external/skia/src/opts/ |
SkNx_neon.h | 596 uint16x8_t _16 = vmovl_u8(src.fVec); 631 return vget_low_u16(vmovl_u8(src.fVec)); 635 return vmovl_u8(src.fVec);
|
/external/skqp/src/opts/ |
SkNx_neon.h | 586 uint16x8_t _16 = vmovl_u8(src.fVec); 621 return vget_low_u16(vmovl_u8(src.fVec)); 625 return vmovl_u8(src.fVec);
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
vp8_loopfilter_neon.c | 84 q4u16 = vmovl_u8(vget_low_u8(q10));
|
/prebuilts/misc/darwin-x86/analyzer/lib/clang/3.3/include/ |
arm_neon.h | 358 __ai uint16x8_t vmovl_u8(uint8x8_t __a) { function 435 return __a + (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__b, __c)); } 441 return __a + vmovl_u8(vabd_u8(__b, __c)); } 448 return (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__a, __b)); } 454 return vmovl_u8(vabd_u8(__a, __b)); } 534 return vmovl_u8(__a) + vmovl_u8(__b); } 547 return __a + vmovl_u8(__b); } [all...] |
/prebuilts/misc/linux-x86/analyzer/lib/clang/3.3/include/ |
arm_neon.h | 358 __ai uint16x8_t vmovl_u8(uint8x8_t __a) { function 435 return __a + (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__b, __c)); } 441 return __a + vmovl_u8(vabd_u8(__b, __c)); } 448 return (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__a, __b)); } 454 return vmovl_u8(vabd_u8(__a, __b)); } 534 return vmovl_u8(__a) + vmovl_u8(__b); } 547 return __a + vmovl_u8(__b); } [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
quantization_utils.h | 549 vreinterpretq_s16_u16(vmovl_u8(input_value_8x8)); [all...] |
/external/arm-neon-tests/ |
compute_ref.gccarm | |