/external/chromium_org/third_party/libvpx/ |
libvpx_intrinsics_neon.target.linux-arm.mk | 25 third_party/libvpx/source/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c \ 26 third_party/libvpx/source/libvpx/vp8/common/arm/neon/copymem_neon.c \ 27 third_party/libvpx/source/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c \ 28 third_party/libvpx/source/libvpx/vp8/common/arm/neon/dequant_idct_neon.c \ 29 third_party/libvpx/source/libvpx/vp8/common/arm/neon/dequantizeb_neon.c \ 30 third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c \ 31 third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c \ 32 third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c \ 33 third_party/libvpx/source/libvpx/vp8/common/arm/neon/iwalsh_neon.c \ 34 third_party/libvpx/source/libvpx/vp8/common/arm/neon/loopfilter_neon.c [all...] |
/external/clang/test/CodeGen/ |
arm64_vca.c | 1 // RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s 9 // CHECK: llvm.aarch64.neon.facge.v2i32.v2f32 16 // CHECK: llvm.aarch64.neon.facge.v4i32.v4f32{{.*a2,.*a1}} 23 // CHECK: llvm.aarch64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}} 30 // CHECK: llvm.aarch64.neon.facgt.v4i32.v4f32{{.*a2,.*a1}} 36 // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a1,.*a2}} 43 // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a2,.*a1}} 50 // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a1,.*a2}} 57 // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a2,.*a1}}
|
arm-neon-misc.c | 12 // Radar 11998303: Avoid using i64 types for vld1q_lane and vst1q_lane Neon 17 // CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64 21 // CHECK: call void @llvm.arm.neon.vst1.v1i64 27 // CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64 30 // CHECK: call <1 x i64> @llvm.arm.neon.vld1.v1i64
|
/external/libvpx/libvpx/vp8/ |
vp8cx_arm.mk | 36 #File list for neon 38 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/fastquantizeb_neon$(ASM) 39 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/picklpf_arm.c 40 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/denoising_neon.c 41 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/shortfdct_neon$(ASM) 42 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/subtract_neon$(ASM) 43 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_mse16x16_neon$(ASM) 44 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_memcpy_neon$(ASM) 45 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_shortwalsh4x4_neon$(ASM)
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-cvt.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 10 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A) 18 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A) 26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A) 34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A) 38 declare i32 @llvm.aarch64.neon.fcvtas.i32.f32(float) nounwind readnone 39 declare i64 @llvm.aarch64.neon.fcvtas.i64.f32(float) nounwind readnone 40 declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone 41 declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone 50 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A [all...] |
arm64-vaddlv.ll | 1 ; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s 9 %vaddlv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a1) nounwind 19 %vaddlv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> %a1) nounwind 23 declare i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32>) nounwind readnone 25 declare i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32>) nounwind readnone
|
arm64-vhsub.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 62 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 71 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 80 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
arm64-vhadd.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 62 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 71 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 80 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
arm64-vsqrt.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 8 %tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 17 %tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 26 %tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2) 30 declare <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float>, <2 x float>) nounwind readnone 31 declare <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float>, <4 x float>) nounwind readnone 32 declare <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double>, <2 x double>) nounwind readnone 40 %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 49 %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 58 %tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2 [all...] |
arm64-vmax.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 57 declare <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 58 declare <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 59 declare <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16>, <4 x i16>) nounwind readnon [all...] |
arm64-fixed-point-scalar-cvt-dagcombine.ll | 1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s 11 %fcvt_n = tail call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %vecext, i32 9) 15 declare double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64, i32) nounwind readnone
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/ |
vp8cx_arm.mk | 36 #File list for neon 38 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/fastquantizeb_neon$(ASM) 39 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/picklpf_arm.c 40 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/denoising_neon.c 41 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/shortfdct_neon$(ASM) 42 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/subtract_neon$(ASM) 43 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_mse16x16_neon$(ASM) 44 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_memcpy_neon$(ASM) 45 VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_shortwalsh4x4_neon$(ASM)
|
/external/clang/test/Sema/ |
big-endian-neon-initializers.c | 1 // RUN: %clang_cc1 %s -triple arm64_be -target-feature +neon -verify -fsyntax-only -ffreestanding 6 int32x4_t x = {1, 2, 3, 4}; // expected-warning{{vector initializers are not compatible with NEON intrinsics}} expected-note{{consider using vld1q_s32() to initialize a vector from memory, or vcombine_s32(vcreate_s32(), vcreate_s32()) to initialize from integer constants}} 7 int16x4_t y = {1, 2, 3, 4}; // expected-warning{{vector initializers are not compatible with NEON intrinsics}} expected-note{{consider using vld1_s16() to initialize a vector from memory, or vcreate_s16() to initialize from an integer constant}} 8 int64x2_t z = {1, 2}; // expected-warning{{vector initializers are not compatible with NEON intrinsics}} expected-note{{consider using vld1q_s64() to initialize a vector from memory, or vcombine_s64(vcreate_s64(), vcreate_s64()) to initialize from integer constants}} 9 float32x2_t b = {1, 2}; // expected-warning{{vector initializers are not compatible with NEON intrinsics}} expected-note{{consider using vld1_f32() to initialize a vector from memory, or vcreate_f32() to initialize from an integer constant}}
|
neon-vector-types.c | 1 // RUN: %clang_cc1 %s -triple armv7 -target-feature +neon -fsyntax-only -verify 2 // RUN: %clang_cc1 %s -triple armv8 -target-feature +neon -fsyntax-only -verify 9 // Define some valid Neon types. 32 typedef __attribute__((neon_vector_type(1))) int int32x1_t; // expected-error{{Neon vector size must be 64 or 128 bits}} 33 typedef __attribute__((neon_vector_type(3))) int int32x3_t; // expected-error{{Neon vector size must be 64 or 128 bits}}
|
/external/libvpx/libvpx/vpx_scale/ |
vpx_scale.mk | 12 #neon 13 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copyframe_func_neon$(ASM) 14 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copysrcframe_func_neon$(ASM) 15 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_extendframeborders_neon$(ASM) 16 SCALE_SRCS-$(HAVE_NEON) += arm/neon/yv12extend_arm.c
|
/external/llvm/test/Transforms/InstCombine/ |
neon-intrinsics.ll | 3 ; The alignment arguments for NEON load/store intrinsics can be increased 15 %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* bitcast ([8 x i32]* @x to i8*), i32 1) 20 call void @llvm.arm.neon.vst4.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1) 24 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*, i32) nounwind readonly 25 declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/ |
vpx_scale.mk | 12 #neon 13 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copyframe_func_neon$(ASM) 14 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copysrcframe_func_neon$(ASM) 15 SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_extendframeborders_neon$(ASM) 16 SCALE_SRCS-$(HAVE_NEON) += arm/neon/yv12extend_arm.c
|
/external/llvm/test/CodeGen/ARM/ |
vld1.ll | 1 ; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s 3 ; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -regalloc=basic %s -o - \ 10 %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16) 18 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) 28 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) 38 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) 48 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) 58 %tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %tmp0, i32 1) 66 %tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0, i32 1) 74 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8 [all...] |
vabs.ll | 1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s 7 %tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1) 15 %tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1) 23 %tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1) 39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1) 47 %tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1) 55 %tmp2 = call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %tmp1) 67 declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) nounwind readnone 68 declare <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16>) nounwind readnone 69 declare <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32>) nounwind readnon [all...] |
vhsub.ll | 1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 44 %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 53 %tmp3 = call <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 80 %tmp3 = call <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2 [all...] |
vpadal.ll | 1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s 8 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2) 26 %tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2) 35 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2) 44 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2) 53 %tmp3 = call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2) 62 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2) 80 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2 [all...] |
vrec.ll | 1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s 7 %tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1) 15 %tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1) 23 %tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1) 31 %tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1) 35 declare <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32>) nounwind readnone 36 declare <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32>) nounwind readnone 38 declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) nounwind readnone 39 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone 46 %tmp3 = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2 [all...] |
vhadd.ll | 1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 44 %tmp3 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 53 %tmp3 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 80 %tmp3 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2 [all...] |
/external/chromium_org/third_party/webrtc/build/ |
arm_neon.gypi | 9 # This file sets correct neon flags. Include it if you want to build 10 # source with neon intrinsics. 27 '-mfpu=neon',
|
/external/llvm/test/Analysis/BasicAA/ |
intrinsics.ll | 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR:#[0-9]+]] 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR]] 26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 32 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwin [all...] |