/external/libhevc/common/arm64/ |
ihevc_inter_pred_luma_vert_w16inp_w16out.s | 211 shrn v19.4h, v19.4s, #6 230 shrn v20.4h, v20.4s, #6 257 shrn v21.4h, v21.4s, #6 275 shrn v31.4h, v31.4s, #6 302 shrn v19.4h, v19.4s, #6 323 shrn v20.4h, v20.4s, #6 345 shrn v21.4h, v21.4s, #6 360 shrn v31.4h, v31.4s, #6 374 shrn v19.4h, v19.4s, #6 387 shrn v20.4h, v20.4s, # [all...] |
ihevc_intra_pred_chroma_mode_27_to_33.s | 143 shrn v5.8b, v2.8h,#5 //idx = pos >> 5 276 shrn v3.8b, v2.8h,#5 //idx = pos >> 5 373 shrn v3.8b, v2.8h,#5 //idx = pos >> 5
|
ihevc_intra_pred_filters_chroma_mode_19_to_25.s | 256 shrn v5.8b, v2.8h,#5 //idx = pos >> 5 387 shrn v3.8b, v2.8h,#5 //idx = pos >> 5 488 shrn v3.8b, v2.8h,#5 //idx = pos >> 5
|
ihevc_intra_pred_filters_luma_mode_19_to_25.s | 260 shrn v5.8b, v2.8h,#5 //idx = pos >> 5 387 shrn v3.8b, v2.8h,#5 //idx = pos >> 5 487 shrn v3.8b, v2.8h,#5 //idx = pos >> 5
|
ihevc_intra_pred_luma_mode_27_to_33.s | 148 shrn v5.8b, v2.8h,#5 //idx = pos >> 5 281 shrn v3.8b, v2.8h,#5 //idx = pos >> 5 379 shrn v3.8b, v2.8h,#5 //idx = pos >> 5
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vecFold.ll | 9 ; CHECK: shrn.8b v0, v0, #5 25 ; CHECK: shrn.4h v0, v0, #5 41 ; CHECK: shrn.2s v0, v0, #5 138 declare <8 x i8> @llvm.aarch64.neon.shrn.v8i8(<8 x i16>, i32) nounwind readnone 139 declare <4 x i16> @llvm.aarch64.neon.shrn.v4i16(<4 x i32>, i32) nounwind readnone 140 declare <2 x i32> @llvm.aarch64.neon.shrn.v2i32(<2 x i64>, i32) nounwind readnone
|
arm64-neon-simd-shift.ll | 215 ; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3 223 ; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9 231 ; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19 239 ; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3 247 ; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9 255 ; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
|
arm64-vshift.ll | 679 ;CHECK: shrn.8b v0, {{v[0-9]+}}, #1 688 ;CHECK: shrn.4h v0, {{v[0-9]+}}, #1 697 ;CHECK: shrn.2s v0, {{v[0-9]+}}, #1 737 declare <8 x i8> @llvm.aarch64.neon.shrn.v8i8(<8 x i16>, i32) nounwind readnone 738 declare <4 x i16> @llvm.aarch64.neon.shrn.v4i16(<4 x i32>, i32) nounwind readnone 739 declare <2 x i32> @llvm.aarch64.neon.shrn.v2i32(<2 x i64>, i32) nounwind readnone [all...] |
/external/libjpeg-turbo/simd/ |
jsimd_arm64_neon.S | 479 shrn ROW1L.4h, v2.4s, #16 487 shrn ROW2R.4h, v2.4s, #16 /* ROW6L.4h <-> ROW2R.4h */ 491 shrn ROW2L.4h, v2.4s, #16 492 shrn ROW1R.4h, v6.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ 501 shrn ROW3R.4h, v4.4s, #16 /* ROW7L.4h <-> ROW3R.4h */ 502 shrn ROW3L.4h, v10.4s, #16 503 shrn ROW0L.4h, v12.4s, #16 504 shrn ROW0R.4h, v6.4s, #16 /* ROW4L.4h <-> ROW0R.4h */ 528 shrn ROW5L.4h, v2.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ 536 shrn ROW6R.4h, v2.4s, #1 [all...] |
/external/libavc/encoder/armv8/ |
ih264e_half_pel_av8.s | 370 shrn v21.4h, v20.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set2) 372 shrn v20.4h, v26.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set1) 388 shrn v28.4h, v2.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set3) 395 shrn v29.4h, v26.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set4) 421 shrn v28.4h, v22.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set5) 486 shrn v21.4h, v20.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set2) 488 shrn v20.4h, v26.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set1) 504 shrn v28.4h, v6.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set3) 511 shrn v29.4h, v26.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (set4) 536 shrn v28.4h, v22.4s, #8 //// shift by 8 and later we will shift by 2 more with rounding (…) [all...] |
/external/llvm/test/MC/AArch64/ |
neon-simd-shift.s | 262 shrn v0.8b, v1.8h, #3 263 shrn v0.4h, v1.4s, #3 264 shrn v0.2s, v1.2d, #3 269 // CHECK: shrn v0.8b, v1.8h, #3 // encoding: [0x20,0x84,0x0d,0x0f] 270 // CHECK: shrn v0.4h, v1.4s, #3 // encoding: [0x20,0x84,0x1d,0x0f] 271 // CHECK: shrn v0.2s, v1.2d, #3 // encoding: [0x20,0x84,0x3d,0x0f]
|
arm64-advsimd.s | [all...] |
/frameworks/rs/cpu_ref/ |
rsCpuIntrinsics_advsimd_3DLUT.S | 89 shrn v14.4h, v8.4s, #8
|
rsCpuIntrinsics_advsimd_Convolve.S | 85 shrn v8.4h, v8.4s, #8
|
rsCpuIntrinsics_advsimd_Resize.S | 435 shrn v1.4h, v1.4s, #1 444 shrn v2.4h, v2.4s, #1
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
scale_neon64.cc | 629 "shrn v6.4h, v16.4s, #16 \n" [all...] |
/external/libavc/common/armv8/ |
ih264_resi_trans_quant_av8.s | 567 shrn v0.4h, v22.4s, #1 //i4_value = (x0 + x1) >> 1; 569 shrn v1.4h, v24.4s, #1 //i4_value = (x0 - x1) >> 1;
|
/external/libyuv/files/source/ |
scale_neon64.cc | [all...] |
/external/vixl/src/vixl/a64/ |
logic-a64.cc | 2495 LogicVRegister Simulator::shrn(VectorFormat vform, function in class:vixl::Simulator [all...] |
macro-assembler-a64.h | [all...] |
/external/llvm/test/MC/Disassembler/AArch64/ |
arm64-advsimd.txt | [all...] |
/toolchain/binutils/binutils-2.25/opcodes/ |
aarch64-tbl.h | 374 /* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */ 382 /* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */ [all...] |
/external/valgrind/none/tests/arm64/ |
fp_and_simd.c | [all...] |
/external/vixl/doc/ |
supported-instructions.md | [all...] |
/external/vixl/test/ |
test-simulator-traces-a64.h | [all...] |