/external/llvm/test/Transforms/SCCP/ |
2009-01-14-IPSCCP-Invoke.ll | 6 %tmp1 = invoke i32 @f() 15 ret i32 %tmp1
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/ |
h264bsd_interpolate_chroma_ver.s | 53 tmp1 RN 7 label 96 ADD tmp1, x0, chrPW ;// tmp1 = x0+ chromaPartWidth 97 CMP tmp1, width ;// x0+chromaPartWidth > width 104 ADD tmp1, y0, chrPH ;// tmp1 = y0 + chromaPartHeight 105 ADD tmp1, tmp1, #1 ;// tmp1 = y0 + chromaPartHeight + 1 106 CMP tmp1, heigh [all...] |
h264bsd_interpolate_mid_hor.s | 44 tmp1 RN 8
label 93 SMUAD tmp1, x_2_0, mult_20_01
98 SMLAD tmp1, x_3_1, mult_20_m5, tmp1
104 SMLABB tmp1, x_6_4, mult_20_m5, tmp1
109 SMLABB tmp1, x_7_5, mult_20_01, tmp1
116 STR tmp1, [mb], #4
124 SMUAD tmp1, x_6_4, mult_20_01 [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vshift.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <8 x i8>, <8 x i8>* %A 35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 42 %tmp1 = load <4 x i16>, <4 x i16>* %A 44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
arm64-vsqrt.ll | 6 %tmp1 = load <2 x float>, <2 x float>* %A 8 %tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 15 %tmp1 = load <4 x float>, <4 x float>* %A 17 %tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 24 %tmp1 = load <2 x double>, <2 x double>* %A 26 %tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2) 38 %tmp1 = load <2 x float>, <2 x float>* %A 40 %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 47 %tmp1 = load <4 x float>, <4 x float>* %A 49 %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2 [all...] |
arm64-trn.ll | 8 %tmp1 = load <8 x i8>, <8 x i8>* %A 10 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> 11 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> 21 %tmp1 = load <4 x i16>, <4 x i16>* %A 23 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> 24 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> 35 %tmp1 = load <2 x i32>, <2 x i32>* %A 37 %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2> 38 %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3> 48 %tmp1 = load <2 x float>, <2 x float>* % [all...] |
arm64-vbitwise.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 7 %tmp3 = call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %tmp1) 14 %tmp1 = load <16 x i8>, <16 x i8>* %A 15 %tmp3 = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %tmp1) 25 %tmp1 = load <8 x i8>, <8 x i8>* %A 26 %tmp2 = sext <8 x i8> %tmp1 to <8 x i16> 33 %tmp1 = load <8 x i8>, <8 x i8>* %A 34 %tmp2 = zext <8 x i8> %tmp1 to <8 x i16> 41 %tmp1 = load <4 x i16>, <4 x i16>* %A 42 %tmp2 = sext <4 x i16> %tmp1 to <4 x i32 [all...] |
arm64-volatile.ll | 10 %tmp1 = load i64, i64* %add.ptr1, align 8 11 %add = add nsw i64 %tmp1, %tmp 24 %tmp1 = load volatile i64, i64* %add.ptr1, align 8 25 %add = add nsw i64 %tmp1, %tmp
|
/external/webrtc/webrtc/modules/audio_coding/codecs/ilbc/ |
state_construct.c | 36 int16_t *tmp1, *tmp2, *tmp3; local 56 tmp1 = sampleVal; 63 *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 2097152) >> 65 tmp1++; 72 *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 262144) >> 74 tmp1++; 81 *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 65536) >> 83 tmp1++; 105 tmp1 = &sampleAr[len-1]; 109 (*tmp3) = (*tmp1) + (*tmp2) [all...] |
/prebuilts/go/darwin-x86/test/bench/shootout/ |
pidigits.go | 50 tmp1 = big.NewInt(0) 67 tmp1.Lsh(numer, 1) 68 tmp1.Add(tmp1, numer) 69 tmp1.Add(tmp1, accum) 70 tmp1.DivMod(tmp1, denom, tmp2) 80 return tmp1.Int64() 87 tmp1.Lsh(numer, 1 49 tmp1 = big.NewInt(0) var [all...] |
/prebuilts/go/linux-x86/test/bench/shootout/ |
pidigits.go | 50 tmp1 = big.NewInt(0) 67 tmp1.Lsh(numer, 1) 68 tmp1.Add(tmp1, numer) 69 tmp1.Add(tmp1, accum) 70 tmp1.DivMod(tmp1, denom, tmp2) 80 return tmp1.Int64() 87 tmp1.Lsh(numer, 1 49 tmp1 = big.NewInt(0) var [all...] |
/external/llvm/test/Transforms/Reassociate/ |
repeats.ll | 14 %tmp1 = and i2 %x, %x 15 %tmp2 = and i2 %tmp1, %x 23 %tmp1 = add i2 %x, %x 24 %tmp2 = add i2 %tmp1, %x 32 %tmp1 = add i2 1, 1 33 %tmp2 = add i2 %tmp1, 1 40 %tmp1 = mul i8 3, 3 41 %tmp2 = mul i8 %tmp1, 3 54 %tmp1 = mul i3 %x, %x 55 %tmp2 = mul i3 %tmp1, % [all...] |
/external/llvm/test/CodeGen/ARM/ |
vfcmp.ll | 10 %tmp1 = load <2 x float>, <2 x float>* %A 12 %tmp3 = fcmp une <2 x float> %tmp1, %tmp2 21 %tmp1 = load <2 x float>, <2 x float>* %A 23 %tmp3 = fcmp olt <2 x float> %tmp1, %tmp2 32 %tmp1 = load <2 x float>, <2 x float>* %A 34 %tmp3 = fcmp ole <2 x float> %tmp1, %tmp2 44 %tmp1 = load <2 x float>, <2 x float>* %A 46 %tmp3 = fcmp uge <2 x float> %tmp1, %tmp2 56 %tmp1 = load <2 x float>, <2 x float>* %A 58 %tmp3 = fcmp ule <2 x float> %tmp1, %tmp [all...] |
vext.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10> 15 %tmp1 = load <8 x i8>, <8 x i8>* %A 17 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4> 24 %tmp1 = load <16 x i8>, <16 x i8>* %A 26 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18> 33 %tmp1 = load <16 x i8>, <16 x i8>* %A 35 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6> 42 %tmp1 = load <4 x i16>, <4 x i16>* %A 44 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6 [all...] |
vldlane.ll | 10 %tmp1 = load <8 x i8>, <8 x i8>* %B 12 %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 3 20 %tmp1 = load <4 x i16>, <4 x i16>* %B 22 %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 2 30 %tmp1 = load <2 x i32>, <2 x i32>* %B 32 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 40 %tmp1 = load <2 x i32>, <2 x i32>* %B 42 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 49 %tmp1 = load <2 x float>, <2 x float>* %B 51 %tmp3 = insertelement <2 x float> %tmp1, float %tmp2, i32 [all...] |
arm-asm.ll | 5 %tmp1 = tail call void (i8*)* (void (i8*)*) asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (i8*)* null ) ; <void (i8*)*> [#uses=0]
|
mls.ll | 6 %tmp1 = mul i32 %a, %b 7 %tmp2 = sub i32 %c, %tmp1 13 %tmp1 = mul i32 %a, %b 14 %tmp2 = sub i32 %tmp1, %c
|
smulw.ll | 11 %tmp1 = sext i16 %b to i32 12 %tmp2 = mul i32 %a, %tmp1 21 %tmp1 = sext i16 %b to i32 22 %tmp2 = mul i32 %a, %tmp1
|
str_post.ll | 7 %tmp1 = trunc i32 %Y to i16 ; <i16> [#uses=1] 8 store i16 %tmp1, i16* %A 19 %tmp1 = ptrtoint i32* %A to i32 ; <i32> [#uses=1] 20 %tmp2 = sub i32 %tmp1, 4 ; <i32> [#uses=1]
|
vsra.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 9 %tmp4 = add <8 x i8> %tmp1, %tmp3 16 %tmp1 = load <4 x i16>, <4 x i16>* %A 19 %tmp4 = add <4 x i16> %tmp1, %tmp3 26 %tmp1 = load <2 x i32>, <2 x i32>* %A 29 %tmp4 = add <2 x i32> %tmp1, %tmp3 36 %tmp1 = load <1 x i64>, <1 x i64>* %A 39 %tmp4 = add <1 x i64> %tmp1, %tmp3 46 %tmp1 = load <16 x i8>, <16 x i8>* %A 49 %tmp4 = add <16 x i8> %tmp1, %tmp [all...] |
/bionic/libc/arch-arm64/generic/bionic/ |
strchr.S | 47 #define tmp1 x3 define 86 ands tmp1, srcin, #31 95 neg tmp1, tmp1 106 lsl tmp1, tmp1, #1 110 lsr tmp1, tmp3, tmp1 113 bic tmp1, tmp3, tmp1 // Mask padding bits [all...] |
strcmp.S | 52 #define tmp1 x7 define 60 eor tmp1, src1, src2 62 tst tmp1, #7 64 ands tmp1, src1, #7 73 sub tmp1, data1, zeroones 76 bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ 112 sub tmp1, tmp3, zeroones 114 bic has_nul, tmp1, tmp2 137 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. * [all...] |
strnlen.S | 47 #define tmp1 x8 define 75 ands tmp1, srcin, #15 93 sub tmp1, data1, zeroones 97 bic has_nul1, tmp1, tmp2 100 orr tmp1, has_nul1, has_nul2 101 ccmp tmp1, #0, #0, pl /* NZCV = 0000 */ 105 orr tmp1, has_nul1, has_nul2 106 cbz tmp1, .Lhit_limit /* No null in final Qword. */ 126 sub tmp1, data2, zeroones 128 bic has_nul2, tmp1, tmp [all...] |
/external/clang/test/CodeGen/ |
tbm-builtins.c | 51 // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1 53 // CHECK-NEXT: {{.*}} = and i32 [[TMP2]], [[TMP1]] 58 // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1 60 // CHECK-NEXT: {{.*}} = and i64 [[TMP2]], [[TMP1]] 101 // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1 103 // CHECK-NEXT: {{.*}} = or i32 [[TMP2]], [[TMP1]] 108 // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1 110 // CHECK-NEXT: {{.*}} = or i64 [[TMP2]], [[TMP1]] 115 // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1 117 // CHECK-NEXT: {{.*}} = or i32 [[TMP2]], [[TMP1]] [all...] |
/external/llvm/test/Bitcode/ |
arm32_neon_vcnt_upgrade.ll | 7 %tmp1 = load <4 x i16>, <4 x i16>* %A 8 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) 15 %tmp1 = load <8 x i8>, <8 x i8>* %A 16 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1)
|