/external/llvm/test/CodeGen/ARM/ |
vpadd.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <2 x float>, <2 x float>* %A 35 %tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 48 %tmp1 = load <8 x i8>, <8 x i8>* %A 49 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1) [all...] |
vshl.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2 [all...] |
vicmp.ll | 13 %tmp1 = load <8 x i8>, <8 x i8>* %A 15 %tmp3 = icmp ne <8 x i8> %tmp1, %tmp2 24 %tmp1 = load <4 x i16>, <4 x i16>* %A 26 %tmp3 = icmp ne <4 x i16> %tmp1, %tmp2 35 %tmp1 = load <2 x i32>, <2 x i32>* %A 37 %tmp3 = icmp ne <2 x i32> %tmp1, %tmp2 46 %tmp1 = load <16 x i8>, <16 x i8>* %A 48 %tmp3 = icmp ne <16 x i8> %tmp1, %tmp2 57 %tmp1 = load <8 x i16>, <8 x i16>* %A 59 %tmp3 = icmp ne <8 x i16> %tmp1, %tmp [all...] |
vst1.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %B 8 call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, i32 16) 16 %tmp1 = load <4 x i16>, <4 x i16>* %B 17 call void @llvm.arm.neon.vst1.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1) 25 %tmp1 = load <2 x i32>, <2 x i32>* %B 26 call void @llvm.arm.neon.vst1.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1) 34 %tmp1 = load <2 x float>, <2 x float>* %B 35 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1) 45 %tmp1 = load <2 x float>, <2 x float>* %B 46 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1 [all...] |
vadd.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = add <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = add <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = add <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = add <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <2 x float>, <2 x float>* %A 44 %tmp3 = fadd <2 x float> %tmp1, %tmp [all...] |
vsub.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = sub <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = sub <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = sub <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = sub <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <2 x float>, <2 x float>* %A 44 %tmp3 = fsub <2 x float> %tmp1, %tmp [all...] |
vrev.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 7 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> 14 %tmp1 = load <4 x i16>, <4 x i16>* %A 15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 22 %tmp1 = load <2 x i32>, <2 x i32>* %A 23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0> 30 %tmp1 = load <2 x float>, <2 x float>* %A 31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0> 38 %tmp1 = load <16 x i8>, <16 x i8>* %A 39 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 (…) [all...] |
div.ll | 22 %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] 23 ret i32 %tmp1 34 %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] 35 ret i32 %tmp1 49 %tmp1 = srem i32 %a, %b ; <i32> [#uses=1] 50 ret i32 %tmp1 64 %tmp1 = urem i32 %a, %b ; <i32> [#uses=1] 65 ret i32 %tmp1 80 %tmp1 = srem i64 %a, %b ; <i64> [#uses=1] 81 ret i64 %tmp1 [all...] |
fparith.ll | 55 %tmp1 = fsub float -0.000000e+00, %a ; <float> [#uses=1] 56 ret float %tmp1 63 %tmp1 = fsub double -0.000000e+00, %a ; <double> [#uses=1] 64 ret double %tmp1 71 %tmp1 = fdiv float %a, %b ; <float> [#uses=1] 72 ret float %tmp1 79 %tmp1 = fdiv double %a, %b ; <double> [#uses=1] 80 ret double %tmp1 87 %tmp1 = call float @fabsf( float %a ) readnone ; <float> [#uses=1] 88 ret float %tmp1 [all...] |
vhadd.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 %tmp1 = load <8 x i8>, <8 x i8>* %A 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 42 %tmp1 = load <4 x i16>, <4 x i16>* %A 44 %tmp3 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
vld2.ll | 18 %tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8.p0i8(i8* %A, i32 8) 19 %tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0 20 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1 30 %tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16.p0i8(i8* %tmp0, i32 32) 31 %tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0 32 %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 1 41 %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32.p0i8(i8* %tmp0, i32 1) 42 %tmp2 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 0 43 %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 1 52 %tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32.p0i8(i8* %tmp0, i32 1 [all...] |
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-uxtb.ll | 10 %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1] 11 ret i32 %tmp1 22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 23 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 34 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 35 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 46 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 47 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 58 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 59 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1 [all...] |
thumb2-ldr_post.ll | 4 %tmp1 = mul i32 %a, %b ; <i32> [#uses=2] 5 %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1] 7 %tmp4 = sub i32 %tmp1, 8 ; <i32> [#uses=1]
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
resample_by_2_internal.c | 34 int32_t tmp0, tmp1, diff; local 46 tmp1 = state[0] + diff * kResampleAllpass[1][0]; 48 diff = tmp1 - state[2]; 54 state[1] = tmp1; 76 tmp1 = state[4] + diff * kResampleAllpass[0][0]; 78 diff = tmp1 - state[6]; 84 state[5] = tmp1; 104 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; 110 if (tmp1 > (int32_t)0x00007FFF) 111 tmp1 = 0x00007FFF 129 int32_t tmp0, tmp1, diff; local 203 int32_t tmp0, tmp1, diff; local 273 int32_t tmp0, tmp1, diff; local 343 int32_t tmp0, tmp1, diff; local 422 int32_t tmp0, tmp1, diff; local 555 int32_t tmp0, tmp1, diff; local [all...] |
/external/boringssl/src/crypto/md5/asm/ |
md5-586.pl | 20 $tmp1="edi"; 47 &mov($tmp1,$C) if $pos < 0; 53 &xor($tmp1,$d); # F function - part 2 55 &and($tmp1,$b); # F function - part 3 58 &xor($tmp1,$d); # F function - part 4 60 &add($a,$tmp1); 61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vcnt.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 7 %tmp3 = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %tmp1) 14 %tmp1 = load <16 x i8>, <16 x i8>* %A 15 %tmp3 = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %tmp1) 22 %tmp1 = load <4 x i16>, <4 x i16>* %A 23 %tmp3 = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %tmp1) 30 %tmp1 = load <8 x i16>, <8 x i16>* %A 31 %tmp3 = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %tmp1) 38 %tmp1 = load <2 x i32>, <2 x i32>* %A 39 %tmp3 = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %tmp1) [all...] |
arm64-vhadd.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <16 x i8>, <16 x i8>* %A 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 24 %tmp1 = load <4 x i16>, <4 x i16>* %A 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 33 %tmp1 = load <8 x i16>, <8 x i16>* %A 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 42 %tmp1 = load <2 x i32>, <2 x i32>* %A 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2 [all...] |
arm64-vmax.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 15 %tmp1 = load <16 x i8>, <16 x i8>* %A 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 24 %tmp1 = load <4 x i16>, <4 x i16>* %A 26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 33 %tmp1 = load <8 x i16>, <8 x i16>* %A 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 42 %tmp1 = load <2 x i32>, <2 x i32>* %A 44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2 [all...] |
neon-bitwise-instructions.ll | 6 %tmp1 = and <8 x i8> %a, %b; 7 ret <8 x i8> %tmp1 13 %tmp1 = and <16 x i8> %a, %b; 14 ret <16 x i8> %tmp1 21 %tmp1 = or <8 x i8> %a, %b; 22 ret <8 x i8> %tmp1 28 %tmp1 = or <16 x i8> %a, %b; 29 ret <16 x i8> %tmp1 36 %tmp1 = xor <8 x i8> %a, %b; 37 ret <8 x i8> %tmp1 [all...] |
arm64-ext.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10> 15 %tmp1 = load <8 x i8>, <8 x i8>* %A 17 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4> 24 %tmp1 = load <16 x i8>, <16 x i8>* %A 26 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18> 33 %tmp1 = load <16 x i8>, <16 x i8>* %A 35 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6> 42 %tmp1 = load <4 x i16>, <4 x i16>* %A 44 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6 [all...] |
arm64-unaligned_ldst.ll | 10 %tmp1 = bitcast i8* %b to i64* 12 %tmp3 = load i64, i64* %tmp1, align 1 23 %tmp1 = bitcast i8* %b to i32* 25 %tmp3 = load i32, i32* %tmp1, align 1 36 %tmp1 = bitcast i8* %b to i16* 38 %tmp3 = load i16, i16* %tmp1, align 1
|
/external/llvm/test/Transforms/ArgumentPromotion/ |
2008-02-01-ReturnAttrs.ll | 14 ; CHECK: %tmp1 = call i32 @deref(i32 %x_addr.val) [[NUW:#[0-9]+]] 15 %tmp1 = call i32 @deref( i32* %x_addr ) nounwind 16 ret i32 %tmp1
|
/external/llvm/test/Transforms/GVN/ |
2008-02-12-UndefLoad.ll | 10 %tmp1 = getelementptr i32, i32* %tmp, i32 1 ; <i32*> [#uses=2] 11 %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1] 14 store i32 %tmp4, i32* %tmp1, align 4
|
/prebuilts/go/darwin-x86/test/bench/shootout/ |
pidigits.c | 41 static mpz_t numer, accum, denom, tmp1, tmp2; variable 49 mpz_mul_2exp(tmp1, numer, 1); 50 mpz_add(tmp1, tmp1, numer); 51 mpz_add(tmp1, tmp1, accum); 52 mpz_fdiv_qr(tmp1, tmp2, tmp1, denom); 61 return mpz_get_ui(tmp1); 68 mpz_mul_2exp(tmp1, numer, 1) [all...] |
/prebuilts/go/linux-x86/test/bench/shootout/ |
pidigits.c | 41 static mpz_t numer, accum, denom, tmp1, tmp2; variable 49 mpz_mul_2exp(tmp1, numer, 1); 50 mpz_add(tmp1, tmp1, numer); 51 mpz_add(tmp1, tmp1, accum); 52 mpz_fdiv_qr(tmp1, tmp2, tmp1, denom); 61 return mpz_get_ui(tmp1); 68 mpz_mul_2exp(tmp1, numer, 1) [all...] |