HomeSort by relevance Sort by last modified time
    Searched full:tmp9 (Results 1 - 25 of 199) sorted by null

1 2 3 4 5 6 7 8

  /external/llvm/test/Transforms/InstCombine/
2007-05-10-icmp-or.ll 2 define i1 @test(i32 %tmp9) {
3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
4 %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
2007-06-06-AshrSignBit.ll 4 define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
17 store i32 %tmp33, i32* %tmp9
18 %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
  /external/llvm/test/CodeGen/X86/
vec_add.ll 5 %tmp9 = add <2 x i64> %b, %a ; <<2 x i64>> [#uses=1]
6 ret <2 x i64> %tmp9
lea-recursion.ll 18 %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
34 %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
35 %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2
    [all...]
vec_shuffle-31.ll 6 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
7 ret <8 x i16> %tmp9
2006-05-08-CoalesceSubRegClass.ll 20 %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
21 %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
22 store i16* %tmp9.upgrd.3, i16** @C
mmx-punpckhdq.ll 10 %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> [#uses=1]
11 %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1]
24 %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
25 store x86_mmx %tmp9, x86_mmx* %x
vec_shift.ll 9 %tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind readnone ; <<8 x i16>> [#uses=1]
10 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
19 %tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16>> [#uses=1]
20 %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
28 %tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone ; <<2 x i64>> [#uses=1]
29 ret <2 x i64> %tmp9
2011-07-13-BadFrameIndexDisplacement.ll 15 %tmp9 = load i8* %tmp8, align 1
16 %tmp10 = sext i8 %tmp9 to i32
sse-align-11.ll 10 %tmp9 = insertelement <4 x float> %tmp8, float %d, i32 3
11 ret <4 x float> %tmp9
vec_insert-7.ll 13 %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx
14 ret x86_mmx %tmp9
vec_shuffle-17.ll 12 %tmp9 = insertelement <2 x double> %tmp8, double 0.000000e+00, i32 1 ; <<2 x double>> [#uses=1]
13 %tmp11 = bitcast <2 x double> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
vec_shuffle-36.ll 8 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 3, i32 2, i32 0, i32 2, i32 1, i32 5, i32 6 , i32 undef >
9 ret <8 x i16> %tmp9
2009-06-15-not-a-tail-call.ll 10 %tmp9 = tail call i8* @memset(i8* %tmp6, i32 0, i64 2) ; <i8*> [#uses=0]
coalescer-commute2.ll 16 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
17 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
25 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
26 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
2007-10-16-CoalescerCrash.ll 8 %tmp9 = sub i64 32, %b ; <i64> [#uses=2]
9 %tmp11 = icmp slt i64 %tmp9, 1 ; <i1> [#uses=1]
11 %tmp2223 = trunc i64 %tmp9 to i32 ; <i32> [#uses=2]
2009-11-16-MachineLICM.ll 20 %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4]
21 %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1]
23 %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1]
25 %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
27 %x_addr.03 = getelementptr float* %x, i64 %tmp9 ; <float*> [#uses=1]
  /external/llvm/test/CodeGen/PowerPC/
fneg.ll 8 %tmp9 = fsub double %tmp7, %tmp4 ; <double> [#uses=1]
9 ret double %tmp9
  /external/llvm/test/Analysis/ScalarEvolution/
2007-08-06-MisinterpretBranch.ll 12 %tmp9 = icmp slt i32 %tmp6, %y ; <i1> [#uses=1]
13 br i1 %tmp9, label %bb, label %return
  /external/llvm/test/Transforms/SCCP/
2008-01-27-UndefCorrelate.ll 21 %tmp9 = icmp slt i32 %k, 10
22 br i1 %tmp9, label %bb.backedge, label %bb12
  /external/llvm/test/CodeGen/ARM/
pr13249.ll 15 %tmp9 = phi i8* [ %tmp12, %bb13 ], [ %tmp, %bb3 ]
17 %tmp12 = getelementptr inbounds i8* %tmp9, i32 1
25 store i8* %tmp9, i8** %arg2, align 4
ifcvt7.ll 15 %tmp9 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=2]
19 %tmp23 = icmp eq %struct.quad_struct* %tmp9, null ; <i1> [#uses=1]
27 %tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 ) ; <i32> [#uses=0]
  /external/llvm/test/Transforms/TailCallElim/
dont_reorder_load.ll 24 %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1]
25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
43 %tmp9 = load i32* %a_arg ; <i32> [#uses=1]
44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
61 %tmp9 = load volatile i32* %a_arg ; <i32> [#uses=1]
62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
  /external/llvm/test/Transforms/LoopStrengthReduce/
2013-01-05-IndBr.ll 19 %tmp9 = phi i8 [ %tmp12, %bb11 ], [ 25, %bb190 ]
24 %tmp12 = add i8 %tmp9, 1
25 %tmp13 = add i8 %tmp9, -19
  /external/llvm/test/Transforms/LoopVectorize/X86/
reduction-crash.ll 21 %tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ]
26 %tmp9 = fadd fast double %tmp4, undef
33 %tmp13 = phi double [ %tmp, %bb2 ], [ %tmp9, %bb3 ]

Completed in 1286 milliseconds

1 2 3 4 5 6 7 8