/external/jpeg/ |
jidctflt.c | 72 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 143 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); 146 z13 = tmp6 + tmp5; /* phase 6 */ 147 z10 = tmp6 - tmp5; 158 tmp6 = tmp12 - tmp7; /* phase 2 */ 159 tmp5 = tmp11 - tmp6; 164 wsptr[DCTSIZE*1] = tmp1 + tmp6; 165 wsptr[DCTSIZE*6] = tmp1 - tmp6; 215 tmp6 = tmp12 - tmp7; 216 tmp5 = tmp11 - tmp6; [all...] |
/external/llvm/test/Analysis/ScalarEvolution/ |
min-max-exprs.ll | 32 %tmp6 = sext i32 %N to i64 33 %tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6 35 ; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
|
/external/llvm/test/CodeGen/X86/ |
2011-06-12-FastAllocSpill.ll | 25 %tmp6 = alloca void ()*, align 8 34 store void ()* %tmp16, void ()** %tmp6, align 8 35 %tmp17 = load void ()*, void ()** %tmp6, align 8
|
2007-11-04-LiveVariablesBug.ll | 12 %tmp6 = call i64* asm sideeffect "foo",
|
2009-01-13-DoubleUpdate.ll | 12 %tmp6.i4.i.i = shufflevector <4 x double> zeroinitializer, <4 x double> %tmp5.i3.i.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x double>> [#uses=1] 13 %tmp14.i8.i.i = shufflevector <4 x double> %tmp6.i4.i.i, <4 x double> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x double>> [#uses=1] 17 %tmp6.i = shufflevector <16 x double> %x, <16 x double> %tmp5.i, <16 x i32> < i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 > ; <<16 x double>> [#uses=1] 18 %tmp14.i = shufflevector <16 x double> %tmp6.i, <16 x double> zeroinitializer, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23 > ; <<16 x double>> [#uses=1]
|
2010-02-23-RematImplicitSubreg.ll | 19 %tmp6 = load i8, i8* undef, align 2 ; <i8> [#uses=3] 20 %conv11 = sext i8 %tmp6 to i64 ; <i64> [#uses=1] 25 %conv18 = sext i8 %tmp6 to i32 ; <i32> [#uses=1] 30 %index.0 = phi i8 [ 0, %if.then ], [ %tmp6, %for.body ] ; <i8> [#uses=1]
|
vec_ins_extract.ll | 13 %tmp6 = fadd <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1] 14 store <4 x float> %tmp6, <4 x float>* %F 27 %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] 28 store <4 x float> %tmp6, <4 x float>* %F
|
2010-04-08-CoalescerBug.ll | 21 %tmp6 = bitcast i32* %tmp4 to i8* 22 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false)
|
lea.ll | 24 %tmp6 = lshr i32 %tmp, 2 25 %tmp7 = mul i32 %tmp6, -4
|
lsr-wrap.ll | 29 %tmp6 = add i8 %tmp5, -112 ; <i8> [#uses=1] 30 %tmp7 = tail call i32 @func_3(i8 signext %tmp6) nounwind ; <i32> [#uses=0]
|
packed_struct.ll | 22 %tmp6 = load i32, i32* getelementptr (%struct.anon, %struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1] 24 %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
|
subreg-to-reg-2.ll | 17 %tmp6 = trunc i64 %tmp to i32 ; <i32> [#uses=1] 21 %tmp10 = zext i32 %tmp6 to i64 ; <i64> [#uses=1]
|
vec_set-3.ll | 11 %tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 3 12 ret <4 x float> %tmp6
|
xor.ll | 53 %tmp6 = and i32 %tmp4not, %b_addr.0 54 %tmp8 = shl i32 %tmp6, 1 76 %tmp6 = and i16 %tmp4not, %b_addr.0 77 %tmp8 = shl i16 %tmp6, 1 98 %tmp6 = and i8 %tmp4not, %b_addr.0 99 %tmp8 = shl i8 %tmp6, 1 120 %tmp6 = and i32 %tmp4not, %b_addr.0 121 %tmp8 = shl i32 %tmp6, 1
|
/external/llvm/test/CodeGen/ARM/ |
vbsl.ll | 13 %tmp6 = and <8 x i8> %tmp5, %tmp3 14 %tmp7 = or <8 x i8> %tmp4, %tmp6 26 %tmp6 = and <4 x i16> %tmp5, %tmp3 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 39 %tmp6 = and <2 x i32> %tmp5, %tmp3 40 %tmp7 = or <2 x i32> %tmp4, %tmp6 52 %tmp6 = and <1 x i64> %tmp5, %tmp3 53 %tmp7 = or <1 x i64> %tmp4, %tmp6 65 %tmp6 = and <16 x i8> %tmp5, %tmp3 66 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all...] |
2012-04-10-DAGCombine.ll | 11 %tmp6 = fadd float %tmp, -1.500000e+01 12 %tmp7 = fdiv float %tmp6, 2.000000e+01
|
ifcvt7.ll | 14 %tmp6 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1] 18 %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
|
zextload_demandedbits.ll | 25 %tmp6 = shl i32 %tmp5, 20 26 %tmp7 = ashr exact i32 %tmp6, 20
|
/external/llvm/test/Transforms/GlobalOpt/ |
2008-01-03-Crash.ll | 23 %tmp6.i4.i = load i32, i32* bitcast (void (i32)** @indirect1 to i32*), align 4 ; <i32> [#uses=0]
|
/external/llvm/test/Analysis/Delinearization/ |
undef.ll | 21 %tmp6 = mul i64 %tmp5, undef 22 %arrayidx69.sum = add i64 undef, %tmp6
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vext.ll | 68 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 70 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4> 89 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 91 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5> 110 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 112 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6> 131 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 133 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2> 152 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 154 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2 [all...] |
/external/llvm/test/CodeGen/PowerPC/ |
return-val-i128.ll | 18 %tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1] 19 %tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
|
vcmp-fold.ll | 12 %tmp6 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1] 13 %tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
|
/external/llvm/test/CodeGen/Thumb2/ |
v8_IT_2.ll | 19 %tmp6 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1] 23 %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
|
/external/llvm/test/Feature/ |
aliases.ll | 40 %tmp6 = add i32 %tmp1, %tmp5 41 %tmp7 = add i32 %tmp6, %tmp0
|