HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 276 - 300 of 1043) sorted by null

<<11121314151617181920>>

  /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
ldstrexd.ll 24 %tmp4 = trunc i64 %val to i32
27 %strexd = tail call i32 @llvm.arm.strexd(i32 %tmp4, i32 %tmp7, i8* %ptr)
load.ll 32 %tmp4 = zext i16 %tmp to i32 ; <i32> [#uses=1]
33 ret i32 %tmp4
  /external/swiftshader/third_party/LLVM/test/CodeGen/CPP/
2009-05-04-CondBr.ll 24 %tmp4 = load i32* %a.addr ; <i32> [#uses=1]
25 store i32 %tmp4, i32* %retval
  /external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/
2007-01-15-AsmDialect.ll 15 %tmp4 = call i32 asm "$(cntlz$|cntlzw$) $0,$1", "=r,r,~{dirflag},~{fpsr},~{flags}"( i32 %tmp3 ) ; <i32> [#uses=1]
16 store i32 %tmp4, i32* %ctz_c
and_sext.ll 16 %tmp4 = ashr i32 %tmp2, 1
17 %tmp5 = trunc i32 %tmp4 to i16
eqv-andc-orc-nor.ll 77 %tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
78 %tmp4.upgrd.3 = bitcast <4 x i32> %tmp4 to <4 x float> ; <<4 x float>> [#uses=1]
79 store <4 x float> %tmp4.upgrd.3, <4 x float>* %P
88 %tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
89 %tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4 ; <<4 x i32>> [#uses=1]
90 %tmp4.upgrd.6 = bitcast <4 x i32> %tmp3 to <4 x float> ; <<4 x float>> [#uses=1]
91 store <4 x float> %tmp4.upgrd.6, <4 x float>* %P
mem-rr-addr-mode.ll 11 %tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
12 %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
ppcf128-1.ll 19 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
20 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
41 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
42 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
63 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
64 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
85 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
86 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
rlwimi-keep-rsh.ll 17 %tmp4 = ashr i32 %a, 4
18 %tmp5 = or i32 %tmp3, %tmp4
rlwinm2.ll 16 %tmp4 = or i32 %tmp1, %tmp3 ; <i32> [#uses=1]
17 %tmp6 = and i32 %tmp4, 127 ; <i32> [#uses=1]
vec_splat.ll 15 %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
16 %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
26 %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
27 %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
65 %tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
67 %tmp4.u = bitcast <16 x i8> %tmp4 to <16 x i8> ; <<16 x i8>> [#uses=1]
68 store <16 x i8> %tmp4.u, <16 x i8>* %A
  /external/swiftshader/third_party/LLVM/test/CodeGen/Thumb2/
thumb2-ldr_pre.ll 17 %tmp4 = sub i32 %tmp1, %b ; <i32> [#uses=1]
18 %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1]
thumb2-uxtb.ll 72 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
73 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
87 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
88 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
115 %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1]
116 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
137 %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
138 %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/CodeGen/X86/
2006-05-08-CoalesceSubRegClass.ll 15 %tmp4 = shl i32 %tmp2, %shift.upgrd.2 ; <i32> [#uses=1]
16 store i32 %tmp4, i32* @B
2007-02-04-OrAddrMode.ll 12 %tmp4 = bitcast float* %tmp3 to i8* ; <i8*> [#uses=1]
13 %ctg2 = getelementptr i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
2007-03-16-InlineAsm.ll 19 %tmp4 = load i32* %tmp ; <i32> [#uses=1]
20 store i32 %tmp4, i32* %retval
2007-05-15-maskmovq.ll 8 %tmp4 = bitcast <1 x i64> %mask1 to x86_mmx ; <x86_mmx> [#uses=1]
10 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P )
2007-07-03-GR64ToVR64.ll 11 %tmp4 = bitcast <1 x i64> %B to x86_mmx ; <<4 x i16>> [#uses=1]
13 %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp6, x86_mmx %tmp4 ) ; <x86_mmx> [#uses=1]
2008-02-06-LoadFoldingBug.ll 6 %tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1]
7 call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind
2009-11-16-UnfoldMemOpBug.ll 21 %tmp4 = icmp eq i32 %tmp3, %count
22 br i1 %tmp4, label %bb2, label %bb1
2011-06-03-x87chain.ll 26 %tmp4 = load i64* %arrayidx, align 8
28 %conv = sitofp i64 %tmp4 to float
aliases.ll 31 %tmp4 = call %FunTy* @bar_f()
32 %tmp5 = add i32 %tmp3, %tmp4
and-or-fold.ll 10 %tmp4 = shl i32 %x, 16
11 %tmp5 = and i32 %tmp4, 16711680
codegen-prepare-cast.ll 17 %tmp4 = getelementptr i8* %tmp, i32 undef ; <i8*> [#uses=1]
18 %tmp5 = load i8* %tmp4 ; <i8> [#uses=0]
div8.ll 20 %tmp4 = load i8* %quotient, align 1
21 ret i8 %tmp4

Completed in 658 milliseconds

<<11121314151617181920>>