/external/llvm/test/CodeGen/NVPTX/ |
vector-select.ll | 9 %tmp4 = load <2 x i32> addrspace(1)* %def_a 12 %0 = icmp sge <2 x i32> %tmp4, zeroinitializer
|
/external/llvm/test/CodeGen/PowerPC/ |
2007-05-30-dagcombine-miscomp.ll | 10 %tmp4 = shl i8 %tmp23, 1 ; <i8> [#uses=1] 11 %tmp5 = and i8 %tmp4, 2 ; <i8> [#uses=1]
|
and-branch.ll | 7 %tmp4 = and i1 %tmp3, %tmp ; <i1> [#uses=1] 8 br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
|
2007-02-16-InlineAsmNConstraint.ll | 9 %tmp4 = call i32 asm "rlwimi $0,$2,$3,$4,$5", "=r,0,r,n,n,n"( i32 0, i32 0, i32 0, i32 24, i32 31 ) ; <i32> [#uses=0]
|
/external/llvm/test/CodeGen/X86/ |
2006-05-17-VectorArg.ll | 9 %tmp4 = fdiv float 1.000000e+00, %tmp3 ; <float> [#uses=1] 10 %tmp11 = insertelement <4 x float> zeroinitializer, float %tmp4, i32 3 ; <<4 x float>> [#uses=1]
|
2007-04-24-Huge-Stack.ll | 13 %tmp4 = call i8* @md5_finish_ctx( %struct.md5_ctx* %ctx, i8* %resblock ) ; <i8*> [#uses=1] 14 ret i8* %tmp4
|
2007-04-27-InlineAsm-IntMemInput.ll | 9 %tmp4 = tail call i32 asm "bsrl $1, $0", "=r,ro,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 10 ) ; <i32> [#uses=1] 10 ret i32 %tmp4
|
2007-07-18-Vector-Extract.ll | 15 %tmp4 = load i64* %tmp2.gep ; <i64> [#uses=1] 16 ret i64 %tmp4
|
inline-asm-x-scalar.ll | 20 %tmp4 = fsub float %tmp1, 0x3810000000000000 ; <float> [#uses=1] 21 tail call void asm sideeffect "", "x,~{dirflag},~{fpsr},~{flags}"( float %tmp4 )
|
or-branch.ll | 8 %tmp4 = or i1 %tmp3, %tmp.upgrd.1 ; <i1> [#uses=1] 9 br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
|
tailcallbyval.ll | 17 %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval %a ) 18 ret i32 %tmp4
|
x86-64-gv-offset.ll | 9 %tmp4 = load double* getelementptr (%struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1] 10 tail call void @t( float %tmp2, double %tmp4 ) nounwind
|
2010-08-04-MaskedSignedCompare.ll | 23 %tmp4.pre = load i32* @g_38 ; <i32> [#uses=1] 31 %tmp4 = phi i32 [ %tmp4.pre, %entry.if.end_crit_edge ], [ 1, %if.then ] ; <i32> [#uses=1] 32 %call5 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %tmp4) nounwind ; <i32> [#uses=0]
|
packed_struct.ll | 23 %tmp4 = add i32 %tmp3, %tmp ; <i32> [#uses=1] 24 %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1] 31 %tmp4 = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1) ; <i8> [#uses=1] 32 %tmp5 = add i8 %tmp4, %tmp ; <i8> [#uses=1]
|
/external/llvm/test/Transforms/GVN/ |
2008-02-12-UndefLoad.ll | 13 %tmp4 = and i32 %tmp3, -21 ; <i32> [#uses=1] 14 store i32 %tmp4, i32* %tmp1, align 4
|
/external/llvm/test/Transforms/InstCombine/ |
bitcast-bigendian.ll | 16 %tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1] 18 %add = fadd float %tmp24, %tmp4 24 ; CHECK-NEXT: %tmp4 = extractelement <2 x float> {{.*}}, i32 1 25 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4 38 %tmp4 = bitcast i32 %tmp2 to float 40 %add = fadd float %tmp24, %tmp4 46 ; CHECK-NEXT: %tmp4 = extractelement <4 x float> {{.*}}, i32 1 47 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
|
/external/llvm/test/Transforms/Reassociate/ |
secondary.ll | 17 %tmp4 = add i32 %tmp3, 2014710503 19 %tmp6 = sub i32 %tmp4, %tmp5
|
/external/llvm/test/CodeGen/ARM/ |
vld2.ll | 21 %tmp4 = add <8 x i8> %tmp2, %tmp3 22 ret <8 x i8> %tmp4 33 %tmp4 = add <4 x i16> %tmp2, %tmp3 34 ret <4 x i16> %tmp4 44 %tmp4 = add <2 x i32> %tmp2, %tmp3 45 ret <2 x i32> %tmp4 55 %tmp4 = fadd <2 x float> %tmp2, %tmp3 56 ret <2 x float> %tmp4 68 %tmp4 = fadd <2 x float> %tmp2, %tmp3 71 ret <2 x float> %tmp4 [all...] |
vld3.ll | 22 %tmp4 = add <8 x i8> %tmp2, %tmp3 23 ret <8 x i8> %tmp4 33 %tmp4 = add <4 x i16> %tmp2, %tmp3 34 ret <4 x i16> %tmp4 46 %tmp4 = add <4 x i16> %tmp2, %tmp3 49 ret <4 x i16> %tmp4 59 %tmp4 = add <2 x i32> %tmp2, %tmp3 60 ret <2 x i32> %tmp4 70 %tmp4 = fadd <2 x float> %tmp2, %tmp3 71 ret <2 x float> %tmp4 [all...] |
vld4.ll | 21 %tmp4 = add <8 x i8> %tmp2, %tmp3 22 ret <8 x i8> %tmp4 33 %tmp4 = add <8 x i8> %tmp2, %tmp3 36 ret <8 x i8> %tmp4 47 %tmp4 = add <4 x i16> %tmp2, %tmp3 48 ret <4 x i16> %tmp4 59 %tmp4 = add <2 x i32> %tmp2, %tmp3 60 ret <2 x i32> %tmp4 70 %tmp4 = fadd <2 x float> %tmp2, %tmp3 71 ret <2 x float> %tmp4 [all...] |
smul.ll | 14 %tmp4 = mul i32 %tmp2, %tmp3 ; <i32> [#uses=1] 15 ret i32 %tmp4 23 %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1] 24 ret i32 %tmp4
|
vbsl-constant.ll | 11 %tmp4 = and <8 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 13 %tmp7 = or <8 x i8> %tmp4, %tmp6 25 %tmp4 = and <4 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3> 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 39 %tmp4 = and <2 x i32> %tmp1, <i32 3, i32 3> 41 %tmp7 = or <2 x i32> %tmp4, %tmp6 54 %tmp4 = and <1 x i64> %tmp1, <i64 3> 56 %tmp7 = or <1 x i64> %tmp4, %tmp6 68 %tmp4 = and <16 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 70 %tmp7 = or <16 x i8> %tmp4, %tmp [all...] |
vuzp.ll | 10 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 11 %tmp5 = add <8 x i8> %tmp3, %tmp4 22 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 23 %tmp5 = add <4 x i16> %tmp3, %tmp4 36 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 37 %tmp5 = add <16 x i8> %tmp3, %tmp4 48 %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 49 %tmp5 = add <8 x i16> %tmp3, %tmp4 60 %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 61 %tmp5 = add <4 x i32> %tmp3, %tmp4 [all...] |
vzip.ll | 10 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 11 %tmp5 = add <8 x i8> %tmp3, %tmp4 22 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 23 %tmp5 = add <4 x i16> %tmp3, %tmp4 36 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 37 %tmp5 = add <16 x i8> %tmp3, %tmp4 48 %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 49 %tmp5 = add <8 x i16> %tmp3, %tmp4 60 %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 61 %tmp5 = add <4 x i32> %tmp3, %tmp4 [all...] |
/external/llvm/test/Feature/ |
packed_struct.ll | 22 %tmp4 = add i32 %tmp3, %tmp ; <i32> [#uses=1] 23 %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1] 30 %tmp4 = load i32* getelementptr ([2 x <{ i32, i8 }>]* @bara, i32 0, i32 1, i32 0) ; <i32> [#uses=1] 31 %tmp5 = add i32 %tmp4, %tmp ; <i32> [#uses=1]
|