HomeSort by relevance Sort by last modified time
    Searched full:tmp1 (Results 326 - 350 of 1307) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/X86/
tbm-intrinsics-x86_64.ll 19 %tmp1 = load i32, i32* %a, align 4
20 %0 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %tmp1, i32 2814)
40 %tmp1 = load i64, i64* %a, align 8
41 %0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %tmp1, i64 2814)
2009-04-12-FastIselOverflowCrash.ll 11 %tmp1 = call %0 @llvm.sadd.with.overflow.i32(i32 1, i32 0)
12 %tmp2 = extractvalue %0 %tmp1, 1
16 %tmp4 = extractvalue %0 %tmp1, 0
loop-strength-reduce5.ll 13 %tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2]
14 store volatile i16 %tmp1, i16* @X, align 2
15 %tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1]
  /external/llvm/test/Transforms/ArgumentPromotion/
byval.ll 11 %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
12 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
21 %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
22 store i32 1, i32* %tmp1, align 8
  /external/llvm/test/Transforms/GVN/
bitcast-of-call.ll 7 %tmp1 = bitcast i8* %tmp to i32* ; <i32*> [#uses=0]
  /external/llvm/test/Transforms/IndVarSimplify/
loop_evaluate_5.ll 5 ; Indvars should be able to compute an exit value for %tmp1.
15 %result2 = phi i32 [ %tmp1, %bb2 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
18 %tmp1 = add i32 %tmp2, %result2 ; <i32> [#uses=2]
30 %result.lcssa = phi i32 [ %tmp1, %bb2.bb3_crit_edge ], [ 0, %entry ] ; <i32> [#uses=1]
  /external/v8/test/mjsunit/
delete-vars-from-eval.js 37 eval("var tmp1 = 1");
38 assertEquals(1, tmp1);
39 assertTrue(delete tmp1);
40 assertTrue(typeof(tmp1) == 'undefined');
  /external/llvm/test/Analysis/BasicAA/
2008-04-15-Byval.ll 8 ; CHECK: store i32 2, i32* %tmp1
11 %tmp1 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
12 store i32 2, i32* %tmp1, align 4
  /external/llvm/test/Analysis/ScalarEvolution/
zext-wrap.ll 9 %l_95.0.i1 = phi i8 [ %tmp1, %bb.i ], [ 0, %entry ]
17 %tmp1 = add i8 %l_95.0.i1, -1
18 %phitmp = icmp eq i8 %tmp1, 1
  /external/llvm/test/CodeGen/AArch64/
arm64-dup.ll 6 %tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
7 %tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
20 %tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
21 %tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
30 %tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
31 %tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
38 %tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
39 %tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
46 %tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
47 %tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32
    [all...]
arm64-ldp.ll 8 %tmp1 = load i32, i32* %add.ptr, align 4
9 %add = add nsw i32 %tmp1, %tmp
18 %tmp1 = load i32, i32* %add.ptr, align 4
20 %sexttmp1 = sext i32 %tmp1 to i64
31 %tmp1 = load i32, i32* %add.ptr, align 4
33 %sexttmp1 = zext i32 %tmp1 to i64
44 %tmp1 = load i32, i32* %add.ptr, align 4
46 %sexttmp1 = sext i32 %tmp1 to i64
57 %tmp1 = load i64, i64* %add.ptr, align 8
58 %add = add nsw i64 %tmp1, %tm
    [all...]
arm64-neon-add-pairwise.ll 8 %tmp1 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
10 ret <8 x i8> %tmp1
17 %tmp1 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
19 ret <16 x i8> %tmp1
26 %tmp1 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
28 ret <4 x i16> %tmp1
35 %tmp1 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
37 ret <8 x i16> %tmp1
44 %tmp1 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
46 ret <2 x i32> %tmp1
    [all...]
arm64-vsub.ll 6 %tmp1 = load <8 x i16>, <8 x i16>* %A
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
15 %tmp1 = load <4 x i32>, <4 x i32>* %A
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
24 %tmp1 = load <2 x i64>, <2 x i64>* %A
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
67 %tmp1 = load <8 x i16>, <8 x i16>* %A
69 %tmp3 = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
76 %tmp1 = load <4 x i32>, <4 x i32>* %A
78 %tmp3 = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2
    [all...]
  /external/llvm/test/CodeGen/ARM/
ldr_pre.ll 18 %tmp1 = sub i32 %a, %b ; <i32> [#uses=2]
19 %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
21 %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1]
pack.ll 6 %tmp1 = and i32 %X, 65535
8 %tmp5 = or i32 %tmp4, %tmp1
15 %tmp1 = and i32 %X, 65535
18 %tmp57 = or i32 %tmp4, %tmp1
34 %tmp1 = and i32 %X, 65535
36 %tmp46 = or i32 %tmp3, %tmp1
63 %tmp1 = and i32 %X, -65536
67 %tmp59 = or i32 %tmp4, %tmp1
74 %tmp1 = and i32 %X, -65536
77 %tmp57 = or i32 %tmp4, %tmp1
    [all...]
vdup.ll 7 %tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
8 %tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
21 %tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
22 %tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
31 %tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
32 %tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
39 %tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
40 %tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
47 %tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
48 %tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32
    [all...]
vuzp.ll 12 %tmp1 = load <8 x i8>, <8 x i8>* %A
14 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
15 %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
29 %tmp1 = load <8 x i8>, <8 x i8>* %A
31 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
44 %tmp1 = load <4 x i16>, <4 x i16>* %A
46 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
47 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
61 %tmp1 = load <4 x i16>, <4 x i16>* %A
63 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7
    [all...]
fusedMAC.ll 106 %tmp1 = tail call float @llvm.fma.f32(float %a, float %b, float %c) nounwind readnone
107 ret float %tmp1
114 %tmp1 = tail call double @llvm.fma.f64(double %a, double %b, double %c) nounwind readnone
115 ret double %tmp1
122 %tmp1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
123 ret <2 x float> %tmp1
130 %tmp1 = fsub double -0.0, %a
131 %tmp2 = tail call double @llvm.fma.f64(double %tmp1, double %b, double %c) nounwind readnone
139 %tmp1 = fsub double -0.0, %b
140 %tmp2 = tail call double @llvm.fma.f64(double %a, double %tmp1, double %c) nounwind readnon
    [all...]
  /external/sonivox/arm-wt-22k/lib_src/
ARM-E_voice_gain_gnu.s 46 tmp1 .req r5 label
86 LDR tmp1, [pWTFrame, #m_gainTarget]
94 SMULBB gainIncLeft, tmp1, gainIncLeft
102 SMULBB gainIncRight, tmp1, gainIncRight
110 LDR tmp1, [pMixBuffer]
114 SMLAWB tmp1, gainLeft, tmp0, tmp1
120 STR tmp1, [pMixBuffer], #4
148 LDR tmp1, [pMixBuffer] @ get left channel output sample
153 ADD tmp1, tmp0, tmp1
    [all...]
  /external/pcre/dist/
pcre_jit_compile.c 474 #define TMP1 SLJIT_R0
    [all...]
  /system/core/libutils/
LinearTransform.cpp 48 uint64_t tmp1, tmp2; local
65 // tmp1 = L32(val) * N (a 64 bit int)
67 // M = val * N = (tmp2 << 32) + tmp1
69 tmp1 = (val & UINT32_MAX) * N;
72 // tmp2 = tmp2 + U32(tmp1)
75 tmp2 += tmp1 >> 32;
91 // tmp1 = L32(val) * N
97 // compute tmp1 = (r | M[0, 31])
98 tmp1 = (tmp1 & UINT32_MAX) | ((uint64_t)r << 32)
    [all...]
  /external/llvm/test/CodeGen/AMDGPU/
llvm.AMDGPU.kill.ll 28 %tmp1 = select i1 %tmp0, float 1.0, float 0.0
29 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 1, i32 1, float %tmp1, float %tmp1, float %tmp1, float %tmp1)
  /external/libmpeg2/common/x86/
impeg2_inter_pred_sse42_intr.c 480 __m128i tmp0, tmp1; local
499 tmp1 = _mm_add_epi16(src_r1, src_r1_1); //Row 1 horizontal interpolation
500 tmp0 = _mm_add_epi16(tmp0, tmp1); //Row 0 vertical interpolation
519 tmp1 = _mm_add_epi16(tmp0, tmp1); //Row 1 vertical interpolation
520 tmp1 = _mm_add_epi16(tmp1, value_2);
521 tmp1 = _mm_srli_epi16(tmp1, 2);
522 tmp1 = _mm_packus_epi16(tmp1, value_2)
    [all...]
  /art/runtime/arch/arm64/
memcmp16_arm64.S 41 #define tmp1 x8 define
53 eor tmp1, src1, src2
54 tst tmp1, #7
56 ands tmp1, src1, #7
111 add limit, limit, tmp1 /* Adjust the limit for the extra. */
112 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
114 neg tmp1, tmp1 /* Bits to alignment -64. */
118 lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). *
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
highbd_quantize_intrin_sse2.c 69 __m128i coeffs, coeffs_sign, tmp1, tmp2; local
78 tmp1 = _mm_cmpgt_epi32(coeffs, zbins[i != 0]);
80 tmp1 = _mm_or_si128(tmp1, tmp2);
81 test = _mm_movemask_epi8(tmp1);
88 const int64_t tmp1 = abs_coeff[j] + round_ptr[k != 0]; local
89 const int64_t tmp2 = ((tmp1 * quant_ptr[k != 0]) >> 16) + tmp1;
166 const int64_t tmp1 = abs_coeff local
168 const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1
    [all...]

Completed in 1458 milliseconds

<<11121314151617181920>>