/external/llvm/test/CodeGen/X86/ |
2006-05-02-InstrSched1.ll | 15 %tmp4 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2] 19 %tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
|
2006-05-08-InstrSched.ll | 13 %tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1] 15 %tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
|
2007-11-06-InstrSched.ll | 12 %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1] 13 %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
|
2010-04-08-CoalescerBug.ll | 19 %tmp4 = getelementptr inbounds %struct.FC* %tmp3, i64 0, i32 1, i64 0 21 %tmp6 = bitcast i32* %tmp4 to i8*
|
avoid-loop-align.ll | 33 %tmp4 = sub i32 %tmp3, %tmp ; <i32> [#uses=1] 34 %tmp5 = getelementptr [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
|
byval2.ll | 38 %tmp4 = getelementptr %struct.s* %d, i32 0, i32 2 39 store i64 %c, i64* %tmp4, align 16
|
call-push.ll | 15 %tmp4 = load i32* %tmp23 ; <i32> [#uses=1] 16 %tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
|
stride-nine-with-base-reg.ll | 22 %tmp4 = mul i8 %tmp3, 2 24 store i8 %tmp4, i8* %tmp5, align 4
|
stride-reuse.ll | 18 %tmp4 = fmul float %tmp3, 2.000000e+00 20 store float %tmp4, float* %tmp5, align 4
|
tailcallbyval64.ll | 40 %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17) 41 ret i64 %tmp4
|
use-add-flags.ll | 17 %tmp4 = add i32 %tmp2, %y ; <i32> [#uses=1] 18 %tmp5 = icmp slt i32 %tmp4, 0 ; <i1> [#uses=1]
|
vec_shift3.ll | 20 %tmp4 = tail call <8 x i16> @llvm.x86.sse2.psrai.w( <8 x i16> %tmp2, i32 %bits ) nounwind readnone ; <<8 x i16>> [#uses=1] 21 %tmp5 = bitcast <8 x i16> %tmp4 to <2 x i64> ; <<2 x i64>> [#uses=1]
|
/external/llvm/test/Feature/ |
ppcld.ll | 19 %tmp4 = fpext double %tmp3 to ppc_fp128 ; <ppc_fp128> [#uses=1] 20 store ppc_fp128 %tmp4, ppc_fp128* @ld
|
x86ld.ll | 19 %tmp4 = fpext double %tmp3 to x86_fp80 ; <x86_fp80> [#uses=1] 20 store x86_fp80 %tmp4, x86_fp80* @ld
|
/external/llvm/test/Transforms/ArgumentPromotion/ |
byval.ll | 21 %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1] 22 store i64 2, i64* %tmp4, align 4
|
/external/llvm/test/Transforms/IndVarSimplify/ |
loop_evaluate11.ll | 25 %tmp4 = add i32 %order_start.0, 2 ; <i32> [#uses=1] 26 %tmp5 = add i32 %tmp4, undef ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
2008-04-29-VolatileLoadDontMerge.ll | 15 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] 16 store volatile i32 %tmp4, i32* @g_1, align 4
|
2008-07-08-VolatileLoadMerge.ll | 16 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] 17 store volatile i32 %tmp4, i32* @g_1, align 4
|
2010-11-01-lshr-mask.ll | 32 %tmp4 = and i8 %arg1, 33 35 %tmp7 = or i8 %tmp4, %tmp6
|
bitcast.ll | 29 %tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1] 31 %add = fadd float %tmp24, %tmp4 37 ; CHECK-NEXT: %tmp4 = extractelement <2 x float> {{.*}}, i32 0 38 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4 54 %tmp4 = bitcast i32 %tmp2 to float 56 %add = fadd float %tmp24, %tmp4 62 ; CHECK-NEXT: %tmp4 = extractelement <4 x float> {{.*}}, i32 2 63 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
|
/external/llvm/test/Transforms/LoopUnroll/ |
shifted-tripcount.ll | 19 %tmp4 = load double* %arrayidx ; <double> [#uses=1] 21 %mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
reduction-crash.ll | 21 %tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ] 26 %tmp9 = fadd fast double %tmp4, undef
|
/external/llvm/test/Transforms/LoopVectorize/ |
phi-hang.ll | 39 %tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ] 40 %tmp4 = or i32 %tmp2, %tmp3
|
/external/llvm/test/Transforms/ObjCARC/ |
contract-storestrong-ivar.ll | 25 %tmp4 = tail call i8* @objc_retain(i8* %tmp3) nounwind 28 %tmp6 = bitcast i8* %tmp4 to %1*
|
/external/llvm/test/Transforms/ScalarRepl/ |
load-store-aggregate.ll | 17 %tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1] 18 %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
|