/external/llvm/test/CodeGen/AArch64/ |
arm64-uzp.ll | 8 %tmp1 = load <8 x i8>, <8 x i8>* %A 10 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> 11 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 21 %tmp1 = load <4 x i16>, <4 x i16>* %A 23 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 24 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 34 %tmp1 = load <16 x i8>, <16 x i8>* %A 36 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> 37 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 47 %tmp1 = load <8 x i16>, <8 x i16>* % [all...] |
arm64-vhsub.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <16 x i8>, <16 x i8>* %A 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 24 %tmp1 = load <4 x i16>, <4 x i16>* %A 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 33 %tmp1 = load <8 x i16>, <8 x i16>* %A 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 42 %tmp1 = load <2 x i32>, <2 x i32>* %A 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2 [all...] |
arm64-zip.ll | 8 %tmp1 = load <8 x i8>, <8 x i8>* %A 10 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> 11 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 21 %tmp1 = load <4 x i16>, <4 x i16>* %A 23 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> 24 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 34 %tmp1 = load <16 x i8>, <16 x i8>* %A 36 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> 37 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 47 %tmp1 = load <8 x i16>, <8 x i16>* % [all...] |
arm64-arm64-dead-def-elimination-flag.ll | 8 %tmp1 = alloca i8 9 %tmp2 = icmp eq i8* %tmp1, null
|
arm64-global-address.ll | 12 %tmp1 = add i32 %tmp, %off 13 ret i32 %tmp1
|
arm64-ldp-aa.ll | 15 %tmp1 = load i32, i32* %add.ptr, align 4 16 %add = add nsw i32 %tmp1, %tmp 29 %tmp1 = load i64, i64* %add.ptr, align 8 30 %add = add nsw i64 %tmp1, %tmp 43 %tmp1 = load float, float* %add.ptr, align 4 44 %add = fadd float %tmp, %tmp1 57 %tmp1 = load double, double* %add.ptr, align 8 58 %add = fadd double %tmp, %tmp1
|
/external/llvm/test/CodeGen/ARM/ |
vhsub.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <8 x i8>, <8 x i8>* %A 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 42 %tmp1 = load <4 x i16>, <4 x i16>* %A 44 %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
vrec.ll | 6 %tmp1 = load <2 x i32>, <2 x i32>* %A 7 %tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1) 14 %tmp1 = load <4 x i32>, <4 x i32>* %A 15 %tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1) 22 %tmp1 = load <2 x float>, <2 x float>* %A 23 %tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1) 30 %tmp1 = load <4 x float>, <4 x float>* %A 31 %tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1) 44 %tmp1 = load <2 x float>, <2 x float>* %A 46 %tmp3 = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2 [all...] |
2009-08-31-TwoRegShuffle.ll | 7 %tmp1 = load <4 x i16>, <4 x i16>* %B 8 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32><i32 0, i32 0, i32 1, i32 1>
|
fold-const.ll | 6 %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %conv, i1 true) 9 %cast = trunc i64 %tmp1 to i32
|
hardfloat_neon.ll | 4 %tmp1 = mul <16 x i8> %A, %B 5 ret <16 x i8> %tmp1
|
ifcvt12.ll | 5 %tmp1 = icmp eq i32 %a, 0 6 br i1 %tmp1, label %cond_false, label %cond_true
|
ifcvt8.ll | 12 %tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1] 13 br i1 %tmp1, label %cond_true, label %cond_false
|
large-stack.ll | 18 %tmp1 = load i32, i32* %tmp 19 ret i32 %tmp1
|
ldr.ll | 35 %tmp1 = sub i32 %base, 128 36 %tmp2 = inttoptr i32 %tmp1 to i32* 45 %tmp1 = add i32 %base, %offset 46 %tmp2 = inttoptr i32 %tmp1 to i32* 55 %tmp1 = shl i32 %offset, 2 56 %tmp2 = add i32 %base, %tmp1 66 %tmp1 = lshr i32 %offset, 2 67 %tmp2 = add i32 %base, %tmp1
|
neon_div.ll | 10 %tmp1 = load <8 x i8>, <8 x i8>* %A 12 %tmp3 = sdiv <8 x i8> %tmp1, %tmp2 24 %tmp1 = load <8 x i8>, <8 x i8>* %A 26 %tmp3 = udiv <8 x i8> %tmp1, %tmp2 34 %tmp1 = load <4 x i16>, <4 x i16>* %A 36 %tmp3 = sdiv <4 x i16> %tmp1, %tmp2 45 %tmp1 = load <4 x i16>, <4 x i16>* %A 47 %tmp3 = udiv <4 x i16> %tmp1, %tmp2
|
tls1.ll | 15 %tmp1 = load i32, i32* @i ; <i32> [#uses=1] 16 ret i32 %tmp1
|
vcvt-v8.ll | 5 %tmp1 = load <4 x float>, <4 x float>* %A 6 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1) 13 %tmp1 = load <2 x float>, <2 x float>* %A 14 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1) 21 %tmp1 = load <4 x float>, <4 x float>* %A 22 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1) 29 %tmp1 = load <2 x float>, <2 x float>* %A 30 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1) 37 %tmp1 = load <4 x float>, <4 x float>* %A 38 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1) [all...] |
vqadd.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2 [all...] |
vqsub.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2 [all...] |
/external/llvm/test/Transforms/InstCombine/ |
2008-05-08-LiveStoreDelete.ll | 9 %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5] 10 %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1] 12 %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1] 14 %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1] 15 %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1] 17 %tmp11 = call i32 (...) @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
|
2008-05-08-StrLenSink.ll | 9 %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5] 10 %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1] 12 %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1] 18 %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1] 19 %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1] 21 %tmp11 = call i32 (...) @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
|
/external/clang/test/CodeGen/ |
2008-01-07-UnusualIntSize.c | 12 // CHECK: and i64 %[[TMP1:[^,]+]], 8589934591 13 // CHECK-NOT: and i64 [[TMP1]], 8589934591
|
/external/llvm/test/Assembler/ |
half-constprop.ll | 13 %tmp1 = load half, half* %b, align 2 14 %add = fadd half %tmp, %tmp1
|
/external/llvm/test/CodeGen/AMDGPU/ |
extload-private.ll | 9 %tmp1 = load i8, i8* %tmp0 10 %tmp2 = sext i8 %tmp1 to i32 20 %tmp1 = load i8, i8* %tmp0 21 %tmp2 = zext i8 %tmp1 to i32 31 %tmp1 = load i16, i16* %tmp0 32 %tmp2 = sext i16 %tmp1 to i32 42 %tmp1 = load i16, i16* %tmp0 43 %tmp2 = zext i16 %tmp1 to i32
|