/external/llvm/test/CodeGen/PowerPC/ |
vcmp-fold.ll | 11 %tmp4 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1] 13 %tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
|
vec_veqv_vnand_vorc.ll | 26 %tmp4 = or <4 x i32> %tmp3, %x 28 ret <4 x i32> %tmp4
|
/external/llvm/test/CodeGen/X86/ |
2006-05-02-InstrSched1.ll | 18 %tmp4 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2] 22 %tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
|
2006-05-08-InstrSched.ll | 13 %tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1] 15 %tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
|
2007-11-06-InstrSched.ll | 12 %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1] 13 %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
|
2010-04-08-CoalescerBug.ll | 19 %tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0 21 %tmp6 = bitcast i32* %tmp4 to i8*
|
avoid-loop-align.ll | 33 %tmp4 = sub i32 %tmp3, %tmp ; <i32> [#uses=1] 34 %tmp5 = getelementptr [100 x i32], [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
|
byval2.ll | 38 %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2 39 store i64 %c, i64* %tmp4, align 16
|
call-push.ll | 15 %tmp4 = load i32, i32* %tmp23 ; <i32> [#uses=1] 16 %tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
|
codegen-prepare-cast.ll | 21 %tmp4 = getelementptr i8, i8* %tmp, i32 undef ; <i8*> [#uses=1] 22 %tmp5 = load i8, i8* %tmp4 ; <i8> [#uses=0]
|
div8.ll | 20 %tmp4 = load i8, i8* %quotient, align 1 21 ret i8 %tmp4
|
stride-nine-with-base-reg.ll | 22 %tmp4 = mul i8 %tmp3, 2 24 store i8 %tmp4, i8* %tmp5, align 4
|
stride-reuse.ll | 18 %tmp4 = fmul float %tmp3, 2.000000e+00 20 store float %tmp4, float* %tmp5, align 4
|
tailcallbyval64.ll | 40 %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17) 41 ret i64 %tmp4
|
use-add-flags.ll | 17 %tmp4 = add i32 %tmp2, %y ; <i32> [#uses=1] 18 %tmp5 = icmp slt i32 %tmp4, 0 ; <i1> [#uses=1]
|
vec_shift3.ll | 20 %tmp4 = tail call <8 x i16> @llvm.x86.sse2.psrai.w( <8 x i16> %tmp2, i32 %bits ) nounwind readnone ; <<8 x i16>> [#uses=1] 21 %tmp5 = bitcast <8 x i16> %tmp4 to <2 x i64> ; <<2 x i64>> [#uses=1]
|
/external/llvm/test/Feature/ |
aliases.ll | 38 %tmp4 = call %FunTy @bar_f() 39 %tmp5 = add i32 %tmp3, %tmp4
|
ppcld.ll | 19 %tmp4 = fpext double %tmp3 to ppc_fp128 ; <ppc_fp128> [#uses=1] 20 store ppc_fp128 %tmp4, ppc_fp128* @ld
|
x86ld.ll | 19 %tmp4 = fpext double %tmp3 to x86_fp80 ; <x86_fp80> [#uses=1] 20 store x86_fp80 %tmp4, x86_fp80* @ld
|
/external/llvm/test/Transforms/ArgumentPromotion/ |
byval.ll | 23 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1] 24 store i64 2, i64* %tmp4, align 4
|
/external/llvm/test/Transforms/IndVarSimplify/ |
loop_evaluate11.ll | 25 %tmp4 = add i32 %order_start.0, 2 ; <i32> [#uses=1] 26 %tmp5 = add i32 %tmp4, undef ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
2008-04-29-VolatileLoadDontMerge.ll | 15 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] 16 store volatile i32 %tmp4, i32* @g_1, align 4
|
2008-07-08-VolatileLoadMerge.ll | 16 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] 17 store volatile i32 %tmp4, i32* @g_1, align 4
|
2010-11-01-lshr-mask.ll | 32 %tmp4 = and i8 %arg1, 33 35 %tmp7 = or i8 %tmp4, %tmp6
|
neon-intrinsics.ll | 18 %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2 20 call void @llvm.arm.neon.vst4.p0i8.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
|