/external/llvm/test/CodeGen/AMDGPU/ |
reorder-stores.ll | 12 %tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16 13 store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16 26 %tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16 27 store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16 46 %tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32 47 store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32 61 %tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8 63 %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
|
private-memory-atomics.ll | 15 %tmp4 = atomicrmw add i32* %tmp3, i32 7 acq_rel 16 store i32 %tmp4, i32 addrspace(1)* %out 28 %tmp4 = cmpxchg i32* %tmp3, i32 0, i32 1 acq_rel monotonic 29 %val = extractvalue { i32, i1 } %tmp4, 0
|
/external/llvm/test/CodeGen/ARM/ |
2008-03-05-SxtInRegBug.ll | 8 %tmp4.i.i = icmp slt i8 %tmp3.i.i, 0 ; <i1> [#uses=1] 9 br i1 %tmp4.i.i, label %bb2, label %bb3
|
fnmul.ll | 11 %tmp4 = fmul double %tmp2, %b ; <double> [#uses=1] 12 ret double %tmp4
|
ifcvt6.ll | 8 %tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1] 9 %tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
|
uxtb.ll | 36 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] 37 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 45 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] 46 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 61 %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1] 62 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 70 %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1] 71 %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1]
|
v1-constant-fold.ll | 12 %tmp4 = add <4 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1> 14 call void @bar(<4 x i32> %tmp4)
|
vld2.ll | 21 %tmp4 = add <8 x i8> %tmp2, %tmp3 22 ret <8 x i8> %tmp4 33 %tmp4 = add <4 x i16> %tmp2, %tmp3 34 ret <4 x i16> %tmp4 44 %tmp4 = add <2 x i32> %tmp2, %tmp3 45 ret <2 x i32> %tmp4 55 %tmp4 = fadd <2 x float> %tmp2, %tmp3 56 ret <2 x float> %tmp4 68 %tmp4 = fadd <2 x float> %tmp2, %tmp3 71 ret <2 x float> %tmp4 [all...] |
/external/llvm/test/CodeGen/NVPTX/ |
vector-select.ll | 9 %tmp4 = load <2 x i32>, <2 x i32> addrspace(1)* %def_a 12 %0 = icmp sge <2 x i32> %tmp4, zeroinitializer
|
/external/llvm/test/CodeGen/PowerPC/ |
2007-05-30-dagcombine-miscomp.ll | 10 %tmp4 = shl i8 %tmp23, 1 ; <i8> [#uses=1] 11 %tmp5 = and i8 %tmp4, 2 ; <i8> [#uses=1]
|
and-branch.ll | 7 %tmp4 = and i1 %tmp3, %tmp ; <i1> [#uses=1] 8 br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
|
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-ldr_post.ll | 7 %tmp4 = sub i32 %tmp1, 8 ; <i32> [#uses=1] 8 %tmp5 = mul i32 %tmp4, %tmp3 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
2006-05-17-VectorArg.ll | 9 %tmp4 = fdiv float 1.000000e+00, %tmp3 ; <float> [#uses=1] 10 %tmp11 = insertelement <4 x float> zeroinitializer, float %tmp4, i32 3 ; <<4 x float>> [#uses=1]
|
2007-04-24-Huge-Stack.ll | 13 %tmp4 = call i8* @md5_finish_ctx( %struct.md5_ctx* %ctx, i8* %resblock ) ; <i8*> [#uses=1] 14 ret i8* %tmp4
|
2007-04-27-InlineAsm-IntMemInput.ll | 9 %tmp4 = tail call i32 asm "bsrl $1, $0", "=r,ro,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 10 ) ; <i32> [#uses=1] 10 ret i32 %tmp4
|
2007-07-18-Vector-Extract.ll | 15 %tmp4 = load i64, i64* %tmp2.gep ; <i64> [#uses=1] 16 ret i64 %tmp4
|
avx1-logical-load-folding.ll | 11 %tmp4 = and <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> 12 %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float> 25 %tmp4 = or <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> 26 %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float> 39 %tmp4 = xor <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> 40 %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float> 52 %tmp4 = xor <8 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> 53 %tmp5 = and <8 x i32> %tmp4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
|
inline-asm-x-scalar.ll | 20 %tmp4 = fsub float %tmp1, 0x3810000000000000 ; <float> [#uses=1] 21 tail call void asm sideeffect "", "x,~{dirflag},~{fpsr},~{flags}"( float %tmp4 )
|
tailcallbyval.ll | 17 %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval %a ) 18 ret i32 %tmp4
|
x86-64-gv-offset.ll | 9 %tmp4 = load double, double* getelementptr (%struct.x, %struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1] 10 tail call void @t( float %tmp2, double %tmp4 ) nounwind
|
/external/llvm/test/Transforms/GVN/ |
2008-02-12-UndefLoad.ll | 13 %tmp4 = and i32 %tmp3, -21 ; <i32> [#uses=1] 14 store i32 %tmp4, i32* %tmp1, align 4
|
/external/llvm/test/Transforms/Reassociate/ |
secondary.ll | 17 %tmp4 = add i32 %tmp3, 2014710503 19 %tmp6 = sub i32 %tmp4, %tmp5
|
/external/llvm/test/CodeGen/AArch64/ |
aarch64-DAGCombine-findBetterNeighborChains-crash.ll | 20 %tmp4 = bitcast i8* %tmp to <4 x float>* 21 store volatile <4 x float> zeroinitializer, <4 x float>* %tmp4 35 %tmp4 = bitcast i8* %tmp to <4 x float>* 36 store <4 x float> zeroinitializer, <4 x float>* %tmp4
|
arm64-uzp.ll | 11 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 12 %tmp5 = add <8 x i8> %tmp3, %tmp4 24 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 25 %tmp5 = add <4 x i16> %tmp3, %tmp4 37 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 38 %tmp5 = add <16 x i8> %tmp3, %tmp4 50 %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 51 %tmp5 = add <8 x i16> %tmp3, %tmp4 63 %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 64 %tmp5 = add <4 x i32> %tmp3, %tmp4 [all...] |
arm64-zip.ll | 11 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 12 %tmp5 = add <8 x i8> %tmp3, %tmp4 24 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 25 %tmp5 = add <4 x i16> %tmp3, %tmp4 37 %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 38 %tmp5 = add <16 x i8> %tmp3, %tmp4 50 %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 51 %tmp5 = add <8 x i16> %tmp3, %tmp4 63 %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> 64 %tmp5 = add <4 x i32> %tmp3, %tmp4 [all...] |