HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 701 - 725 of 1043) sorted by null

<<21222324252627282930>>

  /external/llvm/test/CodeGen/X86/
hoist-spill.ll 34 %tmp4 = zext i32 %smax52 to i64
35 %tmp5 = icmp sgt i64 undef, %tmp4
36 %smax53 = select i1 %tmp5, i64 undef, i64 %tmp4
38 %tmp7 = sub nsw i64 %tmp6, %tmp4
sink-hoist.ll 82 ; %tmp4 = and i8 %b, 127 ; <i8> [#uses=1]
83 ; %b_addr.0 = select i1 %tmp2, i8 %tmp4, i8 %tmp3 ; <i8> [#uses=1]
160 %tmp4 = load i32, i32* @cl_options_count, align 4 ; <i32> [#uses=1]
161 %tmp5 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
  /external/llvm/test/Instrumentation/AddressSanitizer/
instrumentation-with-call-threshold.ll 26 %tmp4 = load i80, i80* %d, align 8
  /external/llvm/test/Transforms/SROA/
vector-promotion.ll 29 %tmp4 = add i32 %tmp1, %tmp2
30 %tmp5 = add i32 %tmp3, %tmp4
63 %tmp4 = add i32 %tmp1, %tmp2
64 %tmp5 = add i32 %tmp3, %tmp4
101 %tmp4 = add i32 %tmp1, %tmp2
102 %tmp5 = add i32 %tmp3, %tmp4
145 %tmp4 = add i32 %tmp1, %tmp2
146 %tmp5 = add i32 %tmp3, %tmp4
192 %tmp4 = add i32 %tmp1, %tmp2
193 %tmp5 = add i32 %tmp3, %tmp4
    [all...]
  /external/swiftshader/third_party/LLVM/test/CodeGen/Thumb/
2007-05-05-InvalidPushPop.ll 26 %tmp4 = call i32 @pthread_join( i32 %tmp2, i8** %ret3 ) ; <i32> [#uses=0]
  /external/swiftshader/third_party/LLVM/test/CodeGen/X86/
2009-08-06-branchfolder-crash.ll 51 %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
52 %conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
101 %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
102 %conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
sink-hoist.ll 82 ; %tmp4 = and i8 %b, 127 ; <i8> [#uses=1]
83 ; %b_addr.0 = select i1 %tmp2, i8 %tmp4, i8 %tmp3 ; <i8> [#uses=1]
160 %tmp4 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
161 %tmp5 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
  /external/webp/src/dsp/
dec_msa.c 366 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
381 ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
394 ST6x1_UB(tmp4, 0, tmp2, 4, ptmp, 4);
396 ST6x1_UB(tmp4, 1, tmp2, 5, ptmp, 4);
398 ST6x1_UB(tmp4, 2, tmp2, 6, ptmp, 4);
400 ST6x1_UB(tmp4, 3, tmp2, 7, ptmp, 4);
448 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
465 ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
469 ST4x8_UB(tmp4, tmp5, src, stride);
532 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7 local
597 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; local
    [all...]
  /external/libjpeg-turbo/simd/
jsimd_arm_neon.S 213 TMP4 .req ip
536 ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
540 add TMP4, TMP4, OUTPUT_COL
546 vst1.8 {d23}, [TMP4]
660 .unreq TMP4
719 TMP4 .req ip
    [all...]
  /external/libvpx/libvpx/vpx_dsp/mips/
intrapred8_dspr2.c 16 int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; local
23 "lb %[tmp4], 3(%[left]) \n\t"
32 "replv.qb %[tmp4], %[tmp4] \n\t"
47 "sw %[tmp4], (%[dst]) \n\t"
48 "sw %[tmp4], 4(%[dst]) \n\t"
63 [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7),
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
highbd_quantize_intrin_sse2.c 83 const int64_t tmp4 = ((tmp3 * quant_ptr[k != 0]) >> 16) + tmp3; local
85 (uint32_t)((tmp4 * quant_shift_ptr[k != 0]) >> 16);
  /external/llvm/test/Analysis/Delinearization/
himeno_1.ll 73 %tmp4 = add nsw i64 %k, %tmp3
74 %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
himeno_2.ll 73 %tmp4 = add nsw i64 %k, %tmp3
74 %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
  /external/llvm/test/Analysis/ScalarEvolution/
SolveQuadraticEquation.ll 21 %tmp4 = mul i32 %i.0, 2
22 %tmp5 = sub i32 %SQ, %tmp4
  /external/llvm/test/CodeGen/AArch64/
arm64-prefetch.ll 47 %tmp4 = load i32*, i32** @a, align 8, !tbaa !3
48 %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom
  /external/llvm/test/CodeGen/AMDGPU/
ds-negative-offset-addressing-mode-loop.ll 50 %tmp4 = load float, float addrspace(3)* %arrayidx8, align 4
54 %add12 = fadd float %add11, %tmp4
llvm.AMDGPU.cube.ll 14 %tmp4 = fdiv float %tmp3, %tmp1
21 %tmp11 = insertelement <4 x float> undef, float %tmp4, i32 0
rv7x0_count3.ll 10 %tmp4 = insertelement <4 x float> undef, float %tmp, i32 0
11 %tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 1
  /external/llvm/test/CodeGen/ARM/
2010-05-20-NEONSpillCrash.ll 42 %tmp4 = sub <8 x i8> %tmp4efgh, %tmp4abcd ; <<8 x i8>> [#uses=1]
44 ret <8 x i8> %tmp4
2012-01-24-RegSequenceLiveRange.ll 13 %tmp4 = insertelement <4 x float> %tmp3, float 0.000000e+00, i32 1
14 %tmp5 = insertelement <4 x float> %tmp4, float 0.000000e+00, i32 2
intrinsics-crypto.ll 8 %tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
10 %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %tmp4)
vqdmul.ll 206 %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
207 %tmp5 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
217 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
218 %tmp5 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
251 %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
252 %tmp5 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
262 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
263 %tmp5 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
  /external/llvm/test/CodeGen/Mips/
alloca.ll 14 %tmp4 = alloca i8, i32 %size, align 4
17 %call7 = call i32 @foo(i8* %tmp4) nounwind
  /external/llvm/test/CodeGen/Thumb/
dyn-stackalloc.ll 58 %tmp4 = add i32 %tmp1, 2
59 %tmp5 = add i32 %tmp4, %tmp3
  /external/llvm/test/CodeGen/Thumb2/
2010-02-11-phi-cycle.ll 20 %tmp4 = zext i32 %1 to i64 ; <i64> [#uses=1]
22 %ins = or i64 %tmp4, %mask ; <i64> [#uses=2]

Completed in 1211 milliseconds

<<21222324252627282930>>