/external/libvpx/libvpx/vpx_dsp/mips/ |
vpx_convolve8_avg_vert_msa.c | 567 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 604 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); 605 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS); 606 PCKEV_AVG_ST_UB(tmp7, tmp6, dst3, dst + 16 + dst_stride); 624 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); 625 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS); 626 PCKEV_AVG_ST_UB(tmp7, tmp6, dst7, dst + 48 + dst_stride);
|
vpx_convolve8_vert_msa.c | 564 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 597 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); 598 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS); 599 PCKEV_ST_SB(tmp6, tmp7, dst + 16 + dst_stride); 617 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); 618 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS); 619 PCKEV_ST_SB(tmp6, tmp7, dst + 48 + dst_stride);
|
/external/llvm/test/CodeGen/ARM/ |
vrev.ll | 144 %tmp6 = bitcast double %tmp5 to <2 x float> 145 %tmp7 = fadd <2 x float> %tmp6, %tmp6
|
2007-05-07-tailmerge-1.ll | 33 %tmp6 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
2007-05-09-tailmerge-2.ll | 39 %tmp6 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
ldstrex.ll | 27 %tmp6 = lshr i64 %val, 32 28 %tmp7 = trunc i64 %tmp6 to i32
|
spill-q.ll | 75 %tmp6 = fadd <4 x float> %tmp5, %ld8 76 %tmp7 = fadd <4 x float> %tmp6, %ld9
|
vfp.ll | 121 %tmp6 = or i1 %tmp.upgrd.1, %tmp5 ; <i1> [#uses=1] 124 br i1 %tmp6, label %cond_true, label %cond_false
|
/external/llvm/test/CodeGen/X86/ |
regalloc-reconcile-broken-hints.ll | 68 %tmp6 = sext i32 %tmp5 to i64 72 %indvars.iv.i = phi i64 [ %tmp6, %while.body.lr.ph.i ], [ %indvars.iv.next.i, %while.body.i ] 79 %indvars.iv.i64 = phi i64 [ %indvars.iv.next.i65, %land.rhs.i ], [ 0, %for.body3 ], [ %tmp6, %while.body.i ]
|
remat-fold-load.ll | 54 %tmp6 = load i32, i32* %second3.i.i76, align 4 55 %tmp7 = zext i32 %tmp6 to i128 84 %cmp.i99 = icmp sgt i32 %tmp6, %tmp15
|
pr1505b.ll | 36 %tmp6 = load volatile float, float* @a ; <float> [#uses=1] 39 %tmp9 = tail call float @tanf( float %tmp6 ) ; <float> [#uses=1]
|
scalar_widen_div.ll | 27 %tmp6 = load <2 x i32> addrspace(1)*, <2 x i32> addrspace(1)** %dsource.addr 29 %arrayidx8 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp6, i32 %tmp7
|
vselect-avx.ll | 76 %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3> 77 %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer
|
widen_shuffle-1.ll | 57 %tmp6.i14 = shufflevector <3 x float> %tmp3.i13, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 58 %tmp97.i = shufflevector <4 x float> %tmp6.i14, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
inv_txfm_sse2.c | 334 tmp6 = _mm_madd_epi16(lo_1, cst3); \ 343 tmp6 = _mm_add_epi32(tmp6, rounding); \ 352 tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \ 358 res3 = _mm_packs_epi32(tmp6, tmp7); \ 471 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 560 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 817 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 835 tmp6 = _mm_madd_epi16(lo_35, stg1_3) 1214 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 2189 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 3086 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 3262 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
vp9_idct_intrin_sse2.c | 410 tmp6 = _mm_madd_epi16(lo_1, cst3); \ 419 tmp6 = _mm_add_epi32(tmp6, rounding); \ 428 tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \ 434 res3 = _mm_packs_epi32(tmp6, tmp7); \ 556 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 691 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 1015 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 1412 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 2558 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 3515 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local 3800 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-ldxr-stxr.ll | 24 %tmp6 = lshr i128 %val, 64 25 %tmp7 = trunc i128 %tmp6 to i64 163 %tmp6 = lshr i128 %val, 64 164 %tmp7 = trunc i128 %tmp6 to i64
|
/external/llvm/test/CodeGen/PowerPC/ |
2007-05-22-tailmerge-3.ll | 35 %tmp6 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
|
/external/llvm/test/Transforms/Inline/ |
inline_minisize.ll | 29 %tmp6 = load i32, i32* %i, align 4 31 %cmp = icmp slt i32 %tmp6, %tmp7 129 %tmp6 = load i32, i32* %i, align 4 131 %cmp = icmp slt i32 %tmp6, %tmp7
|
/external/llvm/test/Transforms/InstCombine/ |
xor.ll | 261 ; CHECK-NEXT: %tmp6 = zext i1 %tmp to i32 262 ; CHECK-NEXT: ret i32 %tmp6 266 %tmp6 = zext i1 %tmp to i32 ; <i32> [#uses=1] 267 ret i32 %tmp6
|
simplify-libcalls.ll | 53 %tmp6 = icmp eq i32 %tmp5, 0 ; <i1> [#uses=1] 54 ret i1 %tmp6
|
/external/llvm/test/CodeGen/AMDGPU/ |
split-vector-memoperand-offsets.ll | 40 %tmp6 = mul i32 %tmp2, %tmp 41 %tmp10 = add i32 %tmp3, %tmp6
|
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-spill-q.ll | 75 %tmp6 = fadd <4 x float> %tmp5, %ld8 76 %tmp7 = fadd <4 x float> %tmp6, %ld9
|
/external/llvm/test/MC/ARM/ |
ldr-pseudo.s | 80 @ CHECK: ldr r0, .Ltmp[[TMP6:[0-9]+]] 181 @ CHECK: .Ltmp[[TMP6]]
|
/external/llvm/test/Transforms/LoopRotate/ |
PhiRename-1.ll | 48 %tmp6 = load %struct.operator*, %struct.operator** %op ; <%struct.operator*> [#uses=1] 49 %tmp7 = getelementptr %struct.operator, %struct.operator* %tmp6, i32 0, i32 5 ; <i32*> [#uses=1]
|