/external/llvm/test/Transforms/InstCombine/ |
2007-05-14-Crash.ll | 12 %tmp1 = bitcast %struct.anon* %deviceRef to %struct.def* 13 %tmp3 = getelementptr %struct.def, %struct.def* %tmp1, i32 0, i32 1
|
2007-10-10-EliminateMemCpy.ll | 12 %tmp1 = getelementptr [4 x i8], [4 x i8]* @.str, i32 0, i32 0 13 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %tmp1, i32 4, i32 1, i1 false)
|
2007-10-31-StringCrash.ll | 9 %tmp1 = bitcast void ()* @__darwin_gcc3_preregister_frame_info to i32* ; <i32*> [#uses=1] 10 %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
|
2008-04-22-ByValBitcast.ll | 10 %tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1] 11 call void (i32, ...) @bar( i32 3, %struct.NSRect* byval align 4 %tmp1 ) nounwind
|
pr24354.ll | 14 %tmp1 = icmp eq i32 %tmp, 0 15 br i1 %tmp1, label %bb2, label %bb6
|
/external/llvm/test/Transforms/MemCpyOpt/ |
2008-03-13-ReturnSlotBitcast.ll | 17 %tmp1 = getelementptr %b, %b* %b_var, i32 0, i32 0 18 %tmp2 = load float, float* %tmp1
|
/external/llvm/test/Transforms/SCCP/ |
ipsccp-addr-taken.ll | 18 %tmp1 = icmp ne i32 %c, 0 ; <i1> [#uses=1] 19 %tmp2 = select i1 %tmp1, i32 ()* @foo, i32 ()* @bar ; <i32 ()*> [#uses=1]
|
/external/ltrace/testsuite/ltrace.minor/ |
count-record.c | 42 rename ("system_calls.tmp", "system_calls.tmp1"); 45 remove("system_calls.tmp1");
|
/external/llvm/test/CodeGen/ARM/ |
vld-vst-upgrade.ll | 12 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %ptr, i32 1) 13 ret <2 x i32> %tmp1 21 %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8* %ptr, i32 1) 22 ret %struct.__neon_int32x2x2_t %tmp1 30 %tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8* %ptr, i32 1) 31 ret %struct.__neon_int32x2x3_t %tmp1 39 %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %ptr, i32 1) 40 ret %struct.__neon_int32x2x4_t %tmp1 50 %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %ptr, <2 x i32> %A, <2 x i32> %B, i32 1, i32 1) 51 ret %struct.__neon_int32x2x2_t %tmp1 [all...] |
select_xform.ll | 14 %tmp1 = icmp sgt i32 %c, 10 15 %tmp2 = select i1 %tmp1, i32 0, i32 2147483647 28 %tmp1 = icmp sgt i32 %c, 10 29 %tmp2 = select i1 %tmp1, i32 0, i32 10 71 %tmp1 = icmp eq i32 %a, %b 72 %tmp2 = zext i1 %tmp1 to i32 86 %tmp1 = select i1 %cond, i32 %c, i32 0 87 %tmp2 = xor i32 %tmp1, %d 100 %tmp1 = shl i32 %c, 1 102 %tmp2 = select i1 %cond, i32 %tmp1, i32 - [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/vc/m4p2/src/ |
omxVCM4P2_MCReconBlock_s.s | 361 M_LOAD_X pSrc, srcStep, tmp1, tmp2, tmp3, $offset 365 STRD tmp1, tmp2, [pDst], dstStep 367 M_LOAD_X pSrc, srcStep, tmp1, tmp2, tmp3, $offset 396 M_LOAD_XINT pSrc, srcStep, $offset, tmp1, tmp2, tmp3, tmp4 399 M_EXT_XINT $offset, tmp1, tmp2, tmp3, tmp4 400 M_UHADD8R tmp5, tmp1, tmp3, (1-$rndVal), mask 403 M_LOAD_XINT pSrc, srcStep, $offset, tmp1, tmp2, tmp3, tmp4 427 M_LOAD_X pSrc, srcStep, tmp1, tmp2, tmp5, $offset ;// Pre-load 432 M_UHADD8R tmp1, tmp1, tmp3, (1-$rndVal), mas 598 tmp1 RN 4 label [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
vpx_convolve_msa.h | 21 v8i16 tmp0, tmp1; \ 25 tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \ 26 tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3); \ 27 tmp0 = __msa_adds_s_h(tmp0, tmp1); \
|
sub_pixel_variance_msa.c | 596 v8u16 tmp0, tmp1; local 614 DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); 615 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); 616 out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); 639 v8u16 tmp0, tmp1, tmp2, tmp3; local 660 tmp0, tmp1, tmp2, tmp3); 661 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); 662 PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1); 687 v8u16 tmp0, tmp1, tmp2, tmp3; local 706 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); 797 v8u16 tmp0, tmp1; local 849 v8u16 tmp0, tmp1, tmp2, tmp3; local 910 v8u16 tmp0, tmp1; local 1137 v16u8 tmp0, tmp1, tmp2, tmp3; local 1262 v8u16 tmp0, tmp1; local 1311 v8u16 tmp0, tmp1, tmp2, tmp3; local 1364 v8u16 tmp0, tmp1, tmp2, tmp3; local 1494 v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1; local 1547 v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; local 1619 v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1; local [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
si-triv-disjoint-mem-access.ll | 22 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4 26 %add = add nsw i32 %tmp1, %tmp2 42 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4 46 %add = add nsw i32 %tmp1, %tmp2 62 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4 67 %add = add nsw i32 %tmp1, %tmp2 87 %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4 91 %add = add nsw i32 %tmp1, %tmp2 108 %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4 112 %add = add nsw i32 %tmp1, %tmp [all...] |
/frameworks/av/media/libstagefright/colorconversion/ |
ColorConverter.cpp | 173 signed tmp1 = y1 * 298; local 174 signed b1 = (tmp1 + u_b) / 256; 175 signed g1 = (tmp1 + v_g + u_g) / 256; 176 signed r1 = (tmp1 + v_r) / 256; 289 signed tmp1 = y1 * 298; local 290 signed b1 = (tmp1 + u_b) / 256; 291 signed g1 = (tmp1 + v_g + u_g) / 256; 292 signed r1 = (tmp1 + v_r) / 256; 362 signed tmp1 = y1 * 298; local 363 signed b1 = (tmp1 + u_b) / 256 436 signed tmp1 = y1 * 298; local 506 signed tmp1 = y1 * 298; local [all...] |
/toolchain/binutils/binutils-2.25/cpu/ |
xc16x.cpu | [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vmul.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %A 9 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp1 = load <4 x i16>, <4 x i16>* %A 18 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp1 = load <2 x i32>, <2 x i32>* %A 27 %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 38 %tmp1 = load <8 x i8>, <8 x i8>* %A 40 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 47 %tmp1 = load <4 x i16>, <4 x i16>* %A 49 %tmp3 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2 [all...] |
/external/aac/libFDK/src/arm/ |
scale_arm.cpp | 117 FIXP_DBL tmp1 = mySpec[1]; local 121 tmp1 = fMultDiv2(tmp1, factor); 125 tmp1 <<= shift; 129 *mySpec++ = tmp1; 147 FIXP_DBL tmp1 = mySpec[1]; local 151 tmp1 = fMultDiv2(tmp1, factor); 155 tmp1 >>= shift; 159 *mySpec++ = tmp1; [all...] |
/external/webrtc/webrtc/common_audio/signal_processing/ |
resample_fractional.c | 151 int32_t tmp1 = 16384; local 156 tmp1 += coef * in1[0]; 160 tmp1 += coef * in1[1]; 164 tmp1 += coef * in1[2]; 168 tmp1 += coef * in1[3]; 172 tmp1 += coef * in1[4]; 176 tmp1 += coef * in1[5]; 180 tmp1 += coef * in1[6]; 184 tmp1 += coef * in1[7]; 188 *out1 = tmp1 + coef * in1[8] [all...] |
/toolchain/binutils/binutils-2.25/bfd/ |
elf32-rl78.c | 802 int32_t tmp1, tmp2; 805 RL78_STACK_POP (tmp1); 806 tmp1 += tmp2; 807 RL78_STACK_PUSH (tmp1); 813 int32_t tmp1, tmp2; 819 RL78_STACK_POP (tmp1); /* A */ 820 tmp1 -= tmp2; /* A - B */ 821 RL78_STACK_PUSH (tmp1); 827 int32_t tmp1, tmp2; 830 RL78_STACK_POP (tmp1); 800 int32_t tmp1, tmp2; local 811 int32_t tmp1, tmp2; local 825 int32_t tmp1, tmp2; local 836 int32_t tmp1, tmp2; local 847 int32_t tmp1, tmp2; local 858 int32_t tmp1, tmp2; local 877 int32_t tmp1, tmp2; local 888 int32_t tmp1, tmp2; local 899 int32_t tmp1, tmp2; local 920 int32_t tmp1, tmp2; local 1674 int32_t tmp1, tmp2; local [all...] |
/external/llvm/test/Transforms/SROA/ |
vector-promotion.ll | 18 %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2 19 %tmp1 = load i32, i32* %a.tmp1 29 %tmp4 = add i32 %tmp1, %tmp2 49 %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2 50 %tmp1 = load i32, i32* %a.tmp1 63 %tmp4 = add i32 %tmp1, %tmp2 87 %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2 88 %a.tmp1.cast = bitcast i32* %a.tmp1 to i8 [all...] |
/external/opencv3/3rdparty/libjpeg/ |
jidctint.c | 175 INT32 tmp0, tmp1, tmp2, tmp3; local 244 tmp1 = z2 - z3; 248 tmp11 = tmp1 + tmp3; 249 tmp12 = tmp1 - tmp3; 256 tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); 261 z3 = tmp1 + tmp3; 275 z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ 276 tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ 278 tmp1 += z1 + z3 432 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; local 581 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; local 702 INT32 tmp0, tmp1, tmp10, tmp11, tmp12; local 1011 INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local 1093 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14; local 2565 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; local 2826 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; local 3629 INT32 tmp0, tmp1, tmp2, tmp3; local 3782 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; local 4024 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; local 4741 INT32 tmp0, tmp1, tmp2, tmp3; local 4921 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; local [all...] |
/frameworks/rs/java/tests/VrDemo/src/com/example/android/rs/vr/engine/ |
bugdroid.rs | 71 float tmp1 = dot(normal, pc);
72 tmp1 = tmp1 * tmp1;
73 float tmp2 = (sqrt(length(pc) * length(pc) - tmp1) - radius);
74 return tmp1 + tmp2 * tmp2;
86 float tmp1 = dot(normal, pc);
87 tmp1 = tmp1 * tmp1;
[all...] |
/external/libjpeg-turbo/ |
jidctint.c | 176 INT32 tmp0, tmp1, tmp2, tmp3; local 242 tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); 246 tmp11 = tmp1 + tmp2; 247 tmp12 = tmp1 - tmp2; 254 tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); 259 z2 = tmp1 + tmp2; 261 z4 = tmp1 + tmp3; 265 tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ 277 tmp1 += z2 + z4 430 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; local 579 INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; local 700 INT32 tmp0, tmp1, tmp10, tmp11, tmp12; local 905 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14; local 2377 INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; local [all...] |
/external/llvm/test/CodeGen/X86/ |
emutls.ll | 57 %tmp1 = load i32, i32* @i1 58 ret i32 %tmp1 86 %tmp1 = load i32, i32* @i2 87 ret i32 %tmp1 110 %tmp1 = load i32, i32* @i3 111 ret i32 %tmp1 134 %tmp1 = load i32, i32* @i4 135 ret i32 %tmp1 158 %tmp1 = load i32, i32* @i5 159 ret i32 %tmp1 [all...] |