/external/llvm/test/CodeGen/ARM/ |
vargs_align.ll | 11 %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1] 12 store i32 %tmp1, i32* %retval
|
/external/llvm/test/CodeGen/CPP/ |
2009-05-04-CondBr.ll | 12 %tmp1 = load i32, i32* %a.addr ; <i32> [#uses=1] 13 %cmp = icmp slt i32 %tmp1, 3 ; <i1> [#uses=1]
|
/external/llvm/test/CodeGen/MSP430/ |
2009-09-18-AbsoluteAddr.ll | 14 %tmp1 = load volatile i8, i8* @"\010x0021" ; <i8> [#uses=1] 15 store i8 %tmp1, i8* %tmp
|
/external/llvm/test/CodeGen/Mips/ |
countleading.ll | 24 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true) 25 ret i32 %tmp1 41 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true) 42 ret i32 %tmp1 62 %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true) 63 ret i64 %tmp1 88 %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true) 89 ret i64 %tmp1
|
gprestore.ll | 23 %tmp1 = load i32, i32* @q, align 4 25 tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind
|
/external/llvm/test/CodeGen/PowerPC/ |
2006-08-15-SelectionCrash.ll | 9 %tmp1 = load i32, i32* null ; <i32> [#uses=1] 10 switch i32 %tmp1, label %bb103 [
|
2007-01-15-AsmDialect.ll | 12 %tmp1 = sub i32 0, %tmp ; <i32> [#uses=1] 14 %tmp3 = and i32 %tmp1, %tmp2 ; <i32> [#uses=1]
|
mem-rr-addr-mode.ll | 8 %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1] 9 %tmp = load <4 x float>, <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
|
ppcf128-1.ll | 15 %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] 17 %tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] 37 %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] 39 %tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] 59 %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] 61 %tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] 81 %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] 83 %tmp3 = fdiv ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
|
rlwimi-keep-rsh.ll | 14 %tmp1 = and i32 %tmp0, 255 15 %tmp2 = xor i32 %tmp1, 255
|
/external/llvm/test/CodeGen/Thumb2/ |
carry.ll | 19 %tmp1 = shl i64 %a, 1 20 %tmp2 = sub i64 %tmp1, %b
|
thumb2-rev16.ll | 28 %tmp1 = or i32 %masklo_l8, %masklo_r8 30 %tmp = or i32 %tmp1, %tmp2
|
tls2.ll | 15 %tmp1 = load i32, i32* @i ; <i32> [#uses=1] 16 ret i32 %tmp1
|
/external/llvm/test/CodeGen/X86/ |
2007-03-16-InlineAsm.ll | 14 %tmp1 = load i32, i32* %A_addr ; <i32> [#uses=1] 15 %tmp2 = call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 7, i32 %tmp1 ) ; <i32> [#uses=1]
|
2007-10-31-extractelement-i64.ll | 13 %tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1] 14 %tmp2 = extractelement <2 x i64> %tmp1, i32 0 ; <i64> [#uses=1] 32 %tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1] 33 %tmp2 = extractelement <2 x i64> %tmp1, i32 1 ; <i64> [#uses=1] 51 %tmp1 = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1] 52 %tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1] 71 %tmp1 = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1] 72 %tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1]
|
2008-10-07-SSEISelBug.ll | 15 %tmp1 = load <4 x float>, <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1] 16 store <4 x float> %tmp1, <4 x float>* %retval
|
2009-11-16-UnfoldMemOpBug.ll | 12 %tmp1 = getelementptr inbounds [60 x i8], [60 x i8]* %tmp0, i64 0, i64 0 20 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @str, i64 0, i64 0), i64 28, i32 1, i1 false)
|
2010-01-07-ISelBug.ll | 19 %tmp1.i1.i = call i32 @llvm.bswap.i32(i32 %tmp173) nounwind ; <i32> [#uses=1] 20 store i32 %tmp1.i1.i, i32* undef, align 8
|
2010-05-07-ldconvert.ll | 12 %tmp1 = load i32, i32* %r ; <i32> [#uses=1] 13 %tobool = icmp ne i32 %tmp1, 0 ; <i1> [#uses=1]
|
2011-03-09-Physreg-Coalescing.ll | 15 %tmp1 = load i8, i8* %sp, align 1 16 %div = udiv i8 %tmp1, 10
|
2011-06-01-fildll.ll | 10 %tmp1 = load i64, i64* %x, align 4 12 %conv = sitofp i64 %tmp1 to float
|
avx-cvt.ll | 79 %tmp1 = load i64, i64* %e, align 8 80 %conv = sitofp i64 %tmp1 to double 89 %tmp1 = load i32, i32* %e, align 4 90 %conv = sitofp i32 %tmp1 to double 99 %tmp1 = load i32, i32* %e, align 4 100 %conv = sitofp i32 %tmp1 to float 109 %tmp1 = load i64, i64* %e, align 8 110 %conv = sitofp i64 %tmp1 to float
|
const-base-addr.ll | 15 %tmp1 = load i32, i32* %addr1 20 %tmp4 = add i32 %tmp1, %tmp2
|
dagcombine-buildvector.ll | 20 %tmp1 = load <4 x i16>, <4 x i16>* %src 21 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
expand-opaque-const.ll | 14 %tmp1 = load i64, i64* %op1 17 %tmp3 = lshr i64 %tmp1, %tmp
|