| /external/llvm/test/CodeGen/BPF/ |
| loops.ll | 14 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 15 %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2] 36 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 37 %add = sub i16 %tmp4, %sum.09 ; <i16> [#uses=2] 58 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 59 %add = or i16 %tmp4, %sum.09 ; <i16> [#uses=2] 80 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 81 %add = xor i16 %tmp4, %sum.09 ; <i16> [#uses=2] 102 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 103 %add = and i16 %tmp4, %sum.09 ; <i16> [#uses=2 [all...] |
| /external/llvm/test/CodeGen/MSP430/ |
| postinc.ll | 16 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 17 %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2] 38 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 39 %add = sub i16 %tmp4, %sum.09 ; <i16> [#uses=2] 60 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 61 %add = or i16 %tmp4, %sum.09 ; <i16> [#uses=2] 82 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 83 %add = xor i16 %tmp4, %sum.09 ; <i16> [#uses=2] 104 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 105 %add = and i16 %tmp4, %sum.09 ; <i16> [#uses=2 [all...] |
| /external/swiftshader/third_party/LLVM/test/CodeGen/MSP430/ |
| postinc.ll | 16 %tmp4 = load i16* %arrayidx ; <i16> [#uses=1] 17 %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2] 38 %tmp4 = load i16* %arrayidx ; <i16> [#uses=1] 39 %add = sub i16 %tmp4, %sum.09 ; <i16> [#uses=2] 60 %tmp4 = load i16* %arrayidx ; <i16> [#uses=1] 61 %add = or i16 %tmp4, %sum.09 ; <i16> [#uses=2] 82 %tmp4 = load i16* %arrayidx ; <i16> [#uses=1] 83 %add = xor i16 %tmp4, %sum.09 ; <i16> [#uses=2] 104 %tmp4 = load i16* %arrayidx ; <i16> [#uses=1] 105 %add = and i16 %tmp4, %sum.09 ; <i16> [#uses=2 [all...] |
| /external/webrtc/webrtc/common_audio/signal_processing/ |
| downsample_fast_mips.c | 28 int32_t tmp1, tmp2, tmp3, tmp4, factor_2; local 60 "lwl %[tmp4], 7(%[p_coefs]) \n\t" 64 "lwr %[tmp4], 4(%[p_coefs]) \n\t" 68 "dpa.w.ph $ac0, %[tmp3], %[tmp4] \n\t" 95 [tmp4] "=&r" (tmp4), [p_data_in] "=&r" (p_data_in), 121 "lh %[tmp4], 2(%[p_coefs]) \n\t" 124 "mul %[tmp3], %[tmp3], %[tmp4] \n\t" 155 [tmp4] "=&r" (tmp4), [p_data_in] "=&r" (p_data_in), [k] "=&r" (k) [all...] |
| /external/llvm/test/CodeGen/ARM/ |
| vldlane.ll | 107 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 108 %tmp5 = add <8 x i8> %tmp3, %tmp4 120 %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1 121 %tmp5 = add <4 x i16> %tmp3, %tmp4 132 %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1 133 %tmp5 = add <2 x i32> %tmp3, %tmp4 146 %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1 147 %tmp5 = add <2 x i32> %tmp3, %tmp4 160 %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1 161 %tmp5 = fadd <2 x float> %tmp3, %tmp4 [all...] |
| vbsl.ll | 11 %tmp4 = and <8 x i8> %tmp1, %tmp2 14 %tmp7 = or <8 x i8> %tmp4, %tmp6 24 %tmp4 = and <4 x i16> %tmp1, %tmp2 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 37 %tmp4 = and <2 x i32> %tmp1, %tmp2 40 %tmp7 = or <2 x i32> %tmp4, %tmp6 50 %tmp4 = and <1 x i64> %tmp1, %tmp2 53 %tmp7 = or <1 x i64> %tmp4, %tmp6 63 %tmp4 = and <16 x i8> %tmp1, %tmp2 66 %tmp7 = or <16 x i8> %tmp4, %tmp [all...] |
| vlddup.ll | 73 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer 74 %tmp5 = add <8 x i8> %tmp2, %tmp4 87 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 88 %tmp5 = add <4 x i16> %tmp2, %tmp4 102 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 103 %tmp5 = add <4 x i16> %tmp2, %tmp4 117 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer 118 %tmp5 = add <2 x i32> %tmp2, %tmp4 138 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer 141 %tmp7 = add <8 x i8> %tmp2, %tmp4 [all...] |
| /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
| vldlane.ll | 95 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 96 %tmp5 = add <8 x i8> %tmp3, %tmp4 108 %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1 109 %tmp5 = add <4 x i16> %tmp3, %tmp4 120 %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1 121 %tmp5 = add <2 x i32> %tmp3, %tmp4 134 %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1 135 %tmp5 = add <2 x i32> %tmp3, %tmp4 148 %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1 149 %tmp5 = fadd <2 x float> %tmp3, %tmp4 [all...] |
| /external/libvpx/libvpx/vpx_dsp/mips/ |
| fwd_txfm_msa.c | 30 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); 52 FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, 53 tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); 54 ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); 149 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 168 in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, 171 FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, 172 tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); 179 TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4 [all...] |
| /external/llvm/test/Analysis/ScalarEvolution/ |
| 2008-06-12-BinomialInt64.ll | 31 %d.1.01 = phi i64 [ %tmp4.i, %bb9 ], [ 0, %entry ] ; <i64> [#uses=1] 32 %tmp4.i = add i64 %d.1.01, 1 ; <i64> [#uses=2] 33 %tmp12 = add i64 %accum.03, %tmp4.i ; <i64> [#uses=2]
|
| min-max-exprs.ll | 30 %tmp4 = icmp slt i32 %tmp3, %N 33 %tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6 35 ; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
|
| /external/llvm/test/CodeGen/X86/ |
| 2010-09-16-asmcrash.ll | 33 %tmp4 = getelementptr inbounds %struct._sem, %struct._sem* %sem, i64 0, i32 1, i32 1 43 %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgl $2,$1 ;\09 sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_int", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %tmp4, i32 undef, i32 undef, i32* %tmp4) nounwind, !srcloc !0
|
| pr2849.ll | 25 %tmp4 = getelementptr %struct.HashEntry, %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1 26 %tmp7 = load i8*, i8** %tmp4, align 8 34 call fastcc void @xlprint(i8** %tmp4, i8* %tmp7, i8** %tmp15)
|
| fold-and-shift.ll | 12 %tmp4 = and i32 %tmp2, 1020 13 %tmp7 = getelementptr i8, i8* %X, i32 %tmp4 28 %tmp4 = and i32 %tmp2, 131070 29 %tmp7 = getelementptr i16, i16* %X, i32 %tmp4
|
| /external/llvm/test/Transforms/IndVarSimplify/ |
| loop_evaluate_1.ll | 38 %tmp3 = phi i32 [ %tmp4, %bb1 ], [ %arg, %bb ] 39 %tmp4 = add i32 %tmp3, -2 41 %tmp6 = icmp ugt i32 %tmp4, 10
|
| /external/llvm/test/Transforms/InstCombine/ |
| 2008-06-24-StackRestore.ll | 25 %tmp4 = srem i32 %tmp3857, 1000 ; <i32> [#uses=2] 26 %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=1] 30 %tmp34 = getelementptr i32, i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
|
| /external/llvm/test/Transforms/LICM/ |
| pr23608.ll | 28 %tmp4 = ptrtoint i32* %f_iblock to i64 29 %tmp8 = inttoptr i64 %tmp4 to i32* 30 %tobool = icmp eq i64 %tmp4, 0
|
| /external/swiftshader/third_party/LLVM/test/Analysis/ScalarEvolution/ |
| 2008-06-12-BinomialInt64.ll | 31 %d.1.01 = phi i64 [ %tmp4.i, %bb9 ], [ 0, %entry ] ; <i64> [#uses=1] 32 %tmp4.i = add i64 %d.1.01, 1 ; <i64> [#uses=2] 33 %tmp12 = add i64 %accum.03, %tmp4.i ; <i64> [#uses=2]
|
| /external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
| 2010-09-16-asmcrash.ll | 33 %tmp4 = getelementptr inbounds %struct._sem* %sem, i64 0, i32 1, i32 1 43 %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgl $2,$1 ;\09 sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_int", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %tmp4, i32 undef, i32 undef, i32* %tmp4) nounwind, !srcloc !0
|
| pr2849.ll | 25 %tmp4 = getelementptr %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1 26 %tmp7 = load i8** %tmp4, align 8 34 call fastcc void @xlprint(i8** %tmp4, i8* %tmp7, i8** %tmp15)
|
| stack-align.ll | 13 %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1] 14 volatile store double %tmp4, double* %P 19 %tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
|
| /external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
| 2008-06-24-StackRestore.ll | 25 %tmp4 = srem i32 %tmp3857, 1000 ; <i32> [#uses=2] 26 %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=1] 30 %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
|
| /external/llvm/test/CodeGen/AArch64/ |
| arm64-vshift.ll | 649 %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 650 ret <16 x i8> %tmp4 659 %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 660 ret <8 x i16> %tmp4 669 %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 670 ret <4 x i32> %tmp4 711 %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 712 ret <16 x i8> %tmp4 722 %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 723 ret <8 x i16> %tmp4 [all...] |
| /external/llvm/test/CodeGen/PowerPC/ |
| 2007-09-08-unaligned.ll | 24 %tmp4 = load double, double* %tmp3, align 1 ; <double> [#uses=1] 26 store double %tmp4, double* %tmp5, align 1 42 %tmp4 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1] 43 %tmp5 = load double, double* %tmp4, align 1 ; <double> [#uses=1]
|
| /external/llvm/test/CodeGen/WebAssembly/ |
| irreducible-cfg.ll | 20 %tmp4 = getelementptr double, double* %arg, i32 %arg3 21 %tmp5 = load double, double* %tmp4, align 4 61 %tmp4 = getelementptr double, double* %arg, i32 %arg3 62 %tmp5 = load double, double* %tmp4, align 4
|