/external/llvm/test/CodeGen/ARM/ |
vtbl.ll | 35 %tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5) 36 ret <8 x i8> %tmp6 47 %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3 48 %tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6) 70 %tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5) 71 ret <8 x i8> %tmp6 82 %tmp6 = load <8 x i8>, <8 x i8>* %C 83 %tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6) 95 %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3 97 %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7 [all...] |
vbsl.ll | 13 %tmp6 = and <8 x i8> %tmp5, %tmp3 14 %tmp7 = or <8 x i8> %tmp4, %tmp6 26 %tmp6 = and <4 x i16> %tmp5, %tmp3 27 %tmp7 = or <4 x i16> %tmp4, %tmp6 39 %tmp6 = and <2 x i32> %tmp5, %tmp3 40 %tmp7 = or <2 x i32> %tmp4, %tmp6 52 %tmp6 = and <1 x i64> %tmp5, %tmp3 53 %tmp7 = or <1 x i64> %tmp4, %tmp6 65 %tmp6 = and <16 x i8> %tmp5, %tmp3 66 %tmp7 = or <16 x i8> %tmp4, %tmp6 [all...] |
2012-04-10-DAGCombine.ll | 11 %tmp6 = fadd float %tmp, -1.500000e+01 12 %tmp7 = fdiv float %tmp6, 2.000000e+01
|
ifcvt7.ll | 14 %tmp6 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1] 18 %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
|
zextload_demandedbits.ll | 25 %tmp6 = shl i32 %tmp5, 20 26 %tmp7 = ashr exact i32 %tmp6, 20
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/ |
h264bsd_interpolate_ver_half.s | 56 tmp6 RN 9 label 103 ADD tmp6, y0, partH ;// (y0+partHeight) 104 ADD tmp6, tmp6, #5 ;// (y0+partH+5) 106 CMP tmp6, height 134 MLA tmp6, width, y0, x0 ;// y0*width+x0 135 ADD ref, ref, tmp6 ;// ref += y0*width+x0 155 LDR tmp6, [ref], width ;// |t4|t3|t2|t1| 166 UXTAB16 tmpa, tmpa, tmp6 ;// 16+20(G+M)+A+T 182 UXTAB16 tmpa, tmpa, tmp6, ROR #8 ;// 16+20(G+M)+A+ [all...] |
/external/libjpeg-turbo/ |
jfdctflt.c | 61 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 74 tmp6 = dataptr[1] - dataptr[6]; 97 tmp11 = tmp5 + tmp6; 98 tmp12 = tmp6 + tmp7; 124 tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; 147 tmp11 = tmp5 + tmp6; 148 tmp12 = tmp6 + tmp7;
|
jidctflt.c | 75 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 147 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5] * _0_125); 150 z13 = tmp6 + tmp5; /* phase 6 */ 151 z10 = tmp6 - tmp5; 162 tmp6 = tmp12 - tmp7; /* phase 2 */ 163 tmp5 = tmp11 - tmp6; 168 wsptr[DCTSIZE*1] = tmp1 + tmp6; 169 wsptr[DCTSIZE*6] = tmp1 - tmp6; 220 tmp6 = tmp12 - tmp7; 221 tmp5 = tmp11 - tmp6; [all...] |
/external/opencv3/3rdparty/libjpeg/ |
jfdctflt.c | 62 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 79 tmp6 = (FAST_FLOAT) (GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6])); 103 tmp11 = tmp5 + tmp6; 104 tmp12 = tmp6 + tmp7; 130 tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; 153 tmp11 = tmp5 + tmp6; 154 tmp12 = tmp6 + tmp7;
|
jidctflt.c | 73 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 143 tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); 146 z13 = tmp6 + tmp5; /* phase 6 */ 147 z10 = tmp6 - tmp5; 158 tmp6 = tmp12 - tmp7; /* phase 2 */ 159 tmp5 = tmp11 - tmp6; 164 wsptr[DCTSIZE*1] = tmp1 + tmp6; 165 wsptr[DCTSIZE*6] = tmp1 - tmp6; 216 tmp6 = tmp12 - tmp7; 217 tmp5 = tmp11 - tmp6; [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
fwd_txfm_msa.c | 15 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 36 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); 37 FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 38 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); 39 ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); 134 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 154 tmp6, tmp7, in8, in9, in10, in11, in12, in13, in14, in15); 156 FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 157 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); 164 TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7 [all...] |
/external/llvm/test/Analysis/ScalarEvolution/ |
min-max-exprs.ll | 32 %tmp6 = sext i32 %N to i64 33 %tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6 35 ; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
|
pr25369.ll | 20 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 22 %tmp8 = add i32 %tmp7, %tmp6 57 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 59 %tmp8 = add i32 %tmp7, %tmp6
|
/external/llvm/test/CodeGen/X86/ |
2011-06-12-FastAllocSpill.ll | 25 %tmp6 = alloca void ()*, align 8 34 store void ()* %tmp16, void ()** %tmp6, align 8 35 %tmp17 = load void ()*, void ()** %tmp6, align 8
|
2007-11-04-LiveVariablesBug.ll | 12 %tmp6 = call i64* asm sideeffect "foo",
|
2009-01-13-DoubleUpdate.ll | 12 %tmp6.i4.i.i = shufflevector <4 x double> zeroinitializer, <4 x double> %tmp5.i3.i.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x double>> [#uses=1] 13 %tmp14.i8.i.i = shufflevector <4 x double> %tmp6.i4.i.i, <4 x double> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x double>> [#uses=1] 17 %tmp6.i = shufflevector <16 x double> %x, <16 x double> %tmp5.i, <16 x i32> < i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 > ; <<16 x double>> [#uses=1] 18 %tmp14.i = shufflevector <16 x double> %tmp6.i, <16 x double> zeroinitializer, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23 > ; <<16 x double>> [#uses=1]
|
2010-02-23-RematImplicitSubreg.ll | 19 %tmp6 = load i8, i8* undef, align 2 ; <i8> [#uses=3] 20 %conv11 = sext i8 %tmp6 to i64 ; <i64> [#uses=1] 25 %conv18 = sext i8 %tmp6 to i32 ; <i32> [#uses=1] 30 %index.0 = phi i8 [ 0, %if.then ], [ %tmp6, %for.body ] ; <i8> [#uses=1]
|
vec_ins_extract.ll | 13 %tmp6 = fadd <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1] 14 store <4 x float> %tmp6, <4 x float>* %F 27 %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] 28 store <4 x float> %tmp6, <4 x float>* %F
|
/external/llvm/test/CodeGen/PowerPC/ |
vec_shuffle_p8vector.ll | 11 %tmp6 = insertelement <4 x i32> %tmp5, i32 %tmp4, i32 1 12 %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2 35 %tmp6 = extractelement <4 x i32> %tmp2, i32 3 39 %tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1
|
return-val-i128.ll | 18 %tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1] 19 %tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
|
vcmp-fold.ll | 12 %tmp6 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1] 13 %tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
|
/external/llvm/test/Transforms/GlobalOpt/ |
2008-01-03-Crash.ll | 23 %tmp6.i4.i = load i32, i32* bitcast (void (i32)** @indirect1 to i32*), align 4 ; <i32> [#uses=0]
|
/external/llvm/test/Analysis/Delinearization/ |
undef.ll | 21 %tmp6 = mul i64 %tmp5, undef 22 %arrayidx69.sum = add i64 undef, %tmp6
|
/external/llvm/test/Analysis/LoopAccessAnalysis/ |
nullptr.ll | 32 %tmp6 = getelementptr inbounds i32, i32* %ptr1_or_null, i64 %indvars.iv.next 33 store i32 undef, i32* %tmp6, align 4
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vext.ll | 68 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 70 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4> 89 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 91 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5> 110 %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16> 112 %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6> 131 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 133 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2> 152 %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32> 154 %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2 [all...] |