| /external/llvm/test/CodeGen/ARM/ |
| fast-isel-shift-materialize.ll | 19 %tmp4 = lshr i32 %tmp3, 2 21 call void @foo(i32 %tmp10, i32 %tmp4)
|
| ldr_pre.ll | 21 %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1] 22 %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1]
|
| pr13249.ll | 9 %tmp4 = load i8, i8* %tmp, align 1 14 %tmp8 = phi i8 [ %tmp14, %bb13 ], [ %tmp4, %bb3 ]
|
| /external/llvm/test/CodeGen/Hexagon/vect/ |
| vect-splat.ll | 10 %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1] 11 %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
|
| /external/llvm/test/CodeGen/PowerPC/ |
| 2007-01-15-AsmDialect.ll | 15 %tmp4 = call i32 asm "$(cntlz$|cntlzw$) $0,$1", "=r,r,~{dirflag},~{fpsr},~{flags}"( i32 %tmp3 ) ; <i32> [#uses=1] 16 store i32 %tmp4, i32* %ctz_c
|
| and_sext.ll | 16 %tmp4 = ashr i32 %tmp2, 1 17 %tmp5 = trunc i32 %tmp4 to i16
|
| eqv-andc-orc-nor.ll | 77 %tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] 78 %tmp4.upgrd.3 = bitcast <4 x i32> %tmp4 to <4 x float> ; <<4 x float>> [#uses=1] 79 store <4 x float> %tmp4.upgrd.3, <4 x float>* %P 88 %tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] 89 %tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4 ; <<4 x i32>> [#uses=1] 90 %tmp4.upgrd.6 = bitcast <4 x i32> %tmp3 to <4 x float> ; <<4 x float>> [#uses=1] 91 store <4 x float> %tmp4.upgrd.6, <4 x float>* %P
|
| mem-rr-addr-mode.ll | 11 %tmp4 = load <4 x float>, <4 x float>* %tmp3 ; <<4 x float>> [#uses=1] 12 %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
|
| ppcf128-1.ll | 19 %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] 20 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 41 %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] 42 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 63 %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] 64 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 85 %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] 86 store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
|
| rlwimi-keep-rsh.ll | 17 %tmp4 = ashr i32 %a, 4 18 %tmp5 = or i32 %tmp3, %tmp4
|
| rlwinm2.ll | 16 %tmp4 = or i32 %tmp1, %tmp3 ; <i32> [#uses=1] 17 %tmp6 = and i32 %tmp4, 127 ; <i32> [#uses=1]
|
| vec_splat.ll | 15 %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1] 16 %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1] 26 %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1] 27 %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1] 65 %tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 67 %tmp4.u = bitcast <16 x i8> %tmp4 to <16 x i8> ; <<16 x i8>> [#uses=1] 68 store <16 x i8> %tmp4.u, <16 x i8>* %A
|
| /external/llvm/test/CodeGen/Thumb2/ |
| thumb2-ldr_pre.ll | 16 %tmp4 = sub i32 %tmp1, %b ; <i32> [#uses=1] 17 %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1]
|
| thumb2-rev.ll | 19 %tmp4 = shl i16 %tmp3, 8 20 %tmp5 = or i16 %tmp2, %tmp4
|
| thumb2-uxtb.ll | 72 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] 73 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 87 %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] 88 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 115 %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1] 116 %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] 137 %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1] 138 %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1]
|
| /external/llvm/test/CodeGen/X86/ |
| 2006-05-08-CoalesceSubRegClass.ll | 15 %tmp4 = shl i32 %tmp2, %shift.upgrd.2 ; <i32> [#uses=1] 16 store i32 %tmp4, i32* @B
|
| 2007-02-04-OrAddrMode.ll | 12 %tmp4 = bitcast float* %tmp3 to i8* ; <i8*> [#uses=1] 13 %ctg2 = getelementptr i8, i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
|
| 2007-03-16-InlineAsm.ll | 19 %tmp4 = load i32, i32* %tmp ; <i32> [#uses=1] 20 store i32 %tmp4, i32* %retval
|
| 2007-05-15-maskmovq.ll | 8 %tmp4 = bitcast <1 x i64> %mask1 to x86_mmx ; <x86_mmx> [#uses=1] 10 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P )
|
| 2007-07-03-GR64ToVR64.ll | 11 %tmp4 = bitcast <1 x i64> %B to x86_mmx ; <<4 x i16>> [#uses=1] 13 %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp6, x86_mmx %tmp4 ) ; <x86_mmx> [#uses=1]
|
| 2008-02-06-LoadFoldingBug.ll | 7 %tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1] 8 call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind
|
| 2009-11-16-UnfoldMemOpBug.ll | 22 %tmp4 = icmp eq i32 %tmp3, %count 23 br i1 %tmp4, label %bb2, label %bb1
|
| and-or-fold.ll | 10 %tmp4 = shl i32 %x, 16 11 %tmp5 = and i32 %tmp4, 16711680
|
| avx512-unsafe-fp-math.ll | 18 %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b 19 ret <16 x float> %tmp4; 35 %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b 36 ret <16 x float> %tmp4; 52 %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b 53 ret <8 x double> %tmp4; 69 %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b 70 ret <8 x double> %tmp4;
|
| const-base-addr.ll | 20 %tmp4 = add i32 %tmp1, %tmp2 21 %tmp5 = add i32 %tmp3, %tmp4
|