/external/llvm/test/CodeGen/Thumb2/ |
v8_IT_2.ll | 19 %tmp6 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1] 23 %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
2010-04-08-CoalescerBug.ll | 21 %tmp6 = bitcast i32* %tmp4 to i8* 22 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false)
|
lea.ll | 24 %tmp6 = lshr i32 %tmp, 2 25 %tmp7 = mul i32 %tmp6, -4
|
lsr-wrap.ll | 29 %tmp6 = add i8 %tmp5, -112 ; <i8> [#uses=1] 30 %tmp7 = tail call i32 @func_3(i8 signext %tmp6) nounwind ; <i32> [#uses=0]
|
packed_struct.ll | 22 %tmp6 = load i32, i32* getelementptr (%struct.anon, %struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1] 24 %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
|
subreg-to-reg-2.ll | 17 %tmp6 = trunc i64 %tmp to i32 ; <i32> [#uses=1] 21 %tmp10 = zext i32 %tmp6 to i64 ; <i64> [#uses=1]
|
vec_set-3.ll | 11 %tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 3 12 ret <4 x float> %tmp6
|
/external/llvm/test/Feature/ |
aliases.ll | 40 %tmp6 = add i32 %tmp1, %tmp5 41 %tmp7 = add i32 %tmp6, %tmp0
|
packed_struct.ll | 21 %tmp6 = load i32, i32* getelementptr (%struct.anon, %struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1] 23 %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
|
/external/llvm/test/Integer/ |
packed_struct_bt.ll | 21 %tmp6 = load i35, i35* getelementptr (%struct.anon, %struct.anon* @foos, i32 0, i32 3) ; <i35> [#uses=1] 23 %tmp7 = add i35 %tmp4, %tmp6 ; <i35> [#uses=1]
|
/external/llvm/test/Transforms/IndVarSimplify/ |
loop_evaluate11.ll | 27 %tmp6 = lshr i32 undef, %tmp5 ; <i32> [#uses=1] 28 %tmp7 = icmp eq i32 %tmp6, 0 ; <i1> [#uses=1]
|
/external/llvm/test/Transforms/InstCombine/ |
2007-03-25-BadShiftMask.ll | 15 %tmp6 = load i32, i32* %tmp5 ; <i32> [#uses=1] 16 %tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
|
2010-11-01-lshr-mask.ll | 34 %tmp6 = and i8 %tmp5, 84 35 %tmp7 = or i8 %tmp4, %tmp6
|
urem-simplify-bug.ll | 11 %tmp6 = icmp ne i32 %tmp5, 0 ; <i1> [#uses=1] 12 %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
reduction-crash.ll | 23 %tmp6 = getelementptr inbounds [16 x double], [16 x double]* undef, i32 0, i32 %tmp5 24 %tmp7 = load double, double* %tmp6, align 4
|
/external/llvm/test/Transforms/ObjCARC/ |
contract-storestrong-ivar.ll | 28 %tmp6 = bitcast i8* %tmp4 to %1* 29 store %1* %tmp6, %1** %tmp1, align 8
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
pr23510.ll | 33 %tmp6 = load i64, i64* %arrayidx3, align 8 35 %add9 = add i64 %tmp7, %tmp6
|
/external/llvm/test/Transforms/ScalarRepl/ |
2007-11-03-bigendian_apint.ll | 25 %tmp6 = load i1, i1* %tmp5, align 1 ; <i1> [#uses=1] 26 %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/SimplifyCFG/ |
2007-12-21-Crash.ll | 16 %tmp6 = tail call i32 (...) @foo( ) nounwind ; <i32> [#uses=1] 17 switch i32 %tmp6, label %bb13 [
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/AMDGPU/ |
reassociate-geps-and-slsr-addrspace.ll | 23 %tmp6 = bitcast float addrspace(1)* %p2 to i32 addrspace(1)* 24 %v22 = load i32, i32 addrspace(1)* %tmp6, align 4 50 %tmp6 = bitcast float addrspace(1)* %p2 to i32 addrspace(1)* 51 %v22 = load i32, i32 addrspace(1)* %tmp6, align 4 76 %tmp6 = bitcast float addrspace(3)* %p2 to i32 addrspace(3)* 77 %v22 = load i32, i32 addrspace(3)* %tmp6, align 4 101 %tmp6 = bitcast float addrspace(3)* %p2 to i32 addrspace(3)* 102 %v22 = load i32, i32 addrspace(3)* %tmp6, align 4
|
/external/llvm/test/Transforms/StructurizeCFG/ |
one-loop-multiple-backedges.ll | 20 %tmp6 = fcmp olt float 0.000000e+00, %arg2 22 br i1 %tmp6, label %bb10, label %bb7
|
/external/llvm/test/CodeGen/AArch64/ |
aarch64-smull.ll | 77 %tmp6 = mul <8 x i16> %tmp4, %tmp5 78 %tmp7 = add <8 x i16> %tmp1, %tmp6 90 %tmp6 = mul <4 x i32> %tmp4, %tmp5 91 %tmp7 = add <4 x i32> %tmp1, %tmp6 103 %tmp6 = mul <2 x i64> %tmp4, %tmp5 104 %tmp7 = add <2 x i64> %tmp1, %tmp6 116 %tmp6 = mul <8 x i16> %tmp4, %tmp5 117 %tmp7 = add <8 x i16> %tmp1, %tmp6 129 %tmp6 = mul <4 x i32> %tmp4, %tmp5 130 %tmp7 = add <4 x i32> %tmp1, %tmp6 [all...] |
/external/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/ |
different-addrspace-addressing-mode-loops.ll | 34 %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 35 %tmp7 = add nsw i32 %tmp6, %tmp4 72 %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 73 %tmp7 = add nsw i32 %tmp6, %tmp4 107 %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv 108 %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4 110 store i32 %tmp8, i32 addrspace(1)* %tmp6, align 4 146 %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv 147 %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4 149 store i32 %tmp8, i32 addrspace(1)* %tmp6, align [all...] |
/external/libjpeg-turbo/simd/ |
jfdctint-mmx.asm | 171 psubw mm7,mm2 ; mm7=data1-data6=tmp6 178 movq MMWORD [wk(0)], mm7 ; wk(0)=tmp6 251 movq mm5, MMWORD [wk(0)] ; mm5=tmp6 283 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 285 ; tmp6 = tmp6 * 3.072711026; tmp7 = tmp7 * 1.501321110; 288 ; data3 = tmp6 + z2 + z3; data1 = tmp7 + z1 + z4; 292 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 293 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447) [all...] |
jfdctint-sse2-64.asm | 181 psubw xmm1,xmm2 ; xmm1=data1-data6=tmp6 188 movdqa XMMWORD [wk(0)], xmm1 ; wk(0)=tmp6 261 movdqa xmm3, XMMWORD [wk(0)] ; xmm3=tmp6 293 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 295 ; tmp6 = tmp6 * 3.072711026; tmp7 = tmp7 * 1.501321110; 298 ; data3 = tmp6 + z2 + z3; data1 = tmp7 + z1 + z4; 302 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 303 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447) [all...] |