/external/llvm/test/CodeGen/ARM/ |
vldlane.ll | 106 %tmp5 = add <8 x i8> %tmp3, %tmp4 107 ret <8 x i8> %tmp5 119 %tmp5 = add <4 x i16> %tmp3, %tmp4 120 ret <4 x i16> %tmp5 131 %tmp5 = add <2 x i32> %tmp3, %tmp4 132 ret <2 x i32> %tmp5 145 %tmp5 = add <2 x i32> %tmp3, %tmp4 148 ret <2 x i32> %tmp5 159 %tmp5 = fadd <2 x float> %tmp3, %tmp4 160 ret <2 x float> %tmp5 [all...] |
vlddup.ll | 74 %tmp5 = add <8 x i8> %tmp2, %tmp4 75 ret <8 x i8> %tmp5 88 %tmp5 = add <4 x i16> %tmp2, %tmp4 89 ret <4 x i16> %tmp5 103 %tmp5 = add <4 x i16> %tmp2, %tmp4 106 ret <4 x i16> %tmp5 118 %tmp5 = add <2 x i32> %tmp2, %tmp4 119 ret <2 x i32> %tmp5 139 %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 2 140 %tmp6 = shufflevector <8 x i8> %tmp5, <8 x i8> undef, <8 x i32> zeroinitialize [all...] |
smul.ll | 33 %tmp5 = add i32 %tmp3, %a ; <i32> [#uses=1] 34 ret i32 %tmp5
|
/external/jpeg/ |
jfdctflt.c | 61 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 76 tmp5 = dataptr[2] - dataptr[5]; 96 tmp10 = tmp4 + tmp5; /* phase 2 */ 97 tmp11 = tmp5 + tmp6; 126 tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; 146 tmp10 = tmp4 + tmp5; /* phase 2 */ 147 tmp11 = tmp5 + tmp6;
|
jidctflt.c | 72 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 142 tmp5 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); 146 z13 = tmp6 + tmp5; /* phase 6 */ 147 z10 = tmp6 - tmp5; 159 tmp5 = tmp11 - tmp6; 160 tmp4 = tmp10 + tmp5; 166 wsptr[DCTSIZE*2] = tmp2 + tmp5; 167 wsptr[DCTSIZE*5] = tmp2 - tmp5; 216 tmp5 = tmp11 - tmp6; 217 tmp4 = tmp10 + tmp5; [all...] |
/external/qemu/distrib/jpeg-6b/ |
jfdctflt.c | 61 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 76 tmp5 = dataptr[2] - dataptr[5]; 96 tmp10 = tmp4 + tmp5; /* phase 2 */ 97 tmp11 = tmp5 + tmp6; 126 tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; 146 tmp10 = tmp4 + tmp5; /* phase 2 */ 147 tmp11 = tmp5 + tmp6;
|
jidctflt.c | 72 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 142 tmp5 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); 146 z13 = tmp6 + tmp5; /* phase 6 */ 147 z10 = tmp6 - tmp5; 159 tmp5 = tmp11 - tmp6; 160 tmp4 = tmp10 + tmp5; 166 wsptr[DCTSIZE*2] = tmp2 + tmp5; 167 wsptr[DCTSIZE*5] = tmp2 - tmp5; 216 tmp5 = tmp11 - tmp6; 217 tmp4 = tmp10 + tmp5; [all...] |
/external/llvm/test/Analysis/ScalarEvolution/ |
2008-06-12-BinomialInt64.ll | 11 %d.1.01 = phi i64 [ %tmp5.i, %bb10 ], [ 0, %entry ] ; <i64> [#uses=1] 12 %tmp5.i = add i64 %d.1.01, 1 ; <i64> [#uses=2] 13 %tmp14 = add i64 %accum.03, %tmp5.i ; <i64> [#uses=2]
|
/external/llvm/test/CodeGen/PowerPC/ |
2007-09-08-unaligned.ll | 25 %tmp5 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1] 26 store double %tmp4, double* %tmp5, align 1 43 %tmp5 = load double* %tmp4, align 1 ; <double> [#uses=1] 45 %tmp7 = call i32 (i8*, ...)* @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
|
and-branch.ll | 10 %tmp5 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
|
2007-04-30-InlineAsmEarlyClobber.ll | 25 %tmp5 = load i32* %Y ; <i32> [#uses=1] 26 %tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
|
return-val-i128.ll | 17 %tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1] 18 %tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1]
|
rlwimi3.ll | 8 %tmp5 = or i32 %tmp2, %tmp4 ; <i32> [#uses=1] 13 %tmp14 = mul i32 %tmp5, %alpha ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
2009-01-13-DoubleUpdate.ll | 11 %tmp5.i3.i.i = shufflevector <2 x double> %0, <2 x double> undef, <4 x i32> < i32 0, i32 1, i32 undef, i32 undef > ; <<4 x double>> [#uses=1] 12 %tmp6.i4.i.i = shufflevector <4 x double> zeroinitializer, <4 x double> %tmp5.i3.i.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x double>> [#uses=1] 16 %tmp5.i = shufflevector <8 x double> %tmp14.i.i, <8 x double> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef > ; <<16 x double>> [#uses=1] 17 %tmp6.i = shufflevector <16 x double> %x, <16 x double> %tmp5.i, <16 x i32> < i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 > ; <<16 x double>> [#uses=1]
|
or-branch.ll | 12 %tmp5 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
|
2006-05-08-InstrSched.ll | 14 %tmp5 = load i32* @C ; <i32> [#uses=1] 17 %tmp7 = shl i32 %tmp5, %shift.upgrd.3 ; <i32> [#uses=1]
|
2010-04-08-CoalescerBug.ll | 20 %tmp5 = bitcast [32 x i32]* %BitValueArray to i8* 22 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false)
|
avoid-loop-align.ll | 34 %tmp5 = getelementptr [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1] 35 store i32 4, i32* %tmp5, align 4
|
lsr-wrap.ll | 28 %tmp5 = shl i8 %tmp27, 2 ; <i8> [#uses=1] 29 %tmp6 = add i8 %tmp5, -112 ; <i8> [#uses=1]
|
packed_struct.ll | 32 %tmp5 = add i8 %tmp4, %tmp ; <i8> [#uses=1] 33 ret i8 %tmp5
|
shl-anyext.ll | 33 %tmp5 = and i64 %tmp3, %tmp4 ; <i64> [#uses=1] 34 %tmp6 = shl i64 %tmp5, 3 ; <i64> [#uses=1]
|
stride-nine-with-base-reg.ll | 23 %tmp5 = getelementptr [1000 x i8]* @A, i32 0, i32 %i.019.0 24 store i8 %tmp4, i8* %tmp5, align 4
|
/external/llvm/test/Transforms/GlobalOpt/ |
invoke.ll | 24 %tmp5 = landingpad { i8*, i32 } personality i8* undef
|
/external/llvm/test/Assembler/ |
2007-12-11-AddressSpaces.ll | 23 %tmp5 = load i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; <i32 addrspace(11)*> [#uses=1] 24 ret i32 addrspace(11)* %tmp5
|
/external/llvm/test/CodeGen/Thumb2/ |
lsr-deficiency.ll | 24 %tmp5 = sub i32 1000, %indvar ; <i32> [#uses=1] 26 %scevgep = getelementptr i32* %1, i32 %tmp5 ; <i32*> [#uses=1]
|