HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 426 - 450 of 1043) sorted by null

<<11121314151617181920>>

  /external/swiftshader/third_party/LLVM/test/Transforms/LoopUnroll/
shifted-tripcount.ll 19 %tmp4 = load double* %arrayidx ; <double> [#uses=1]
21 %mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/Transforms/ObjCARC/
contract-storestrong-ivar.ll 25 %tmp4 = tail call i8* @objc_retain(i8* %tmp3) nounwind
28 %tmp6 = bitcast i8* %tmp4 to %1*
  /external/swiftshader/third_party/LLVM/test/Transforms/ScalarRepl/
load-store-aggregate.ll 17 %tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
18 %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
memset-aggregate.ll 18 %tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
19 %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
29 %tmp4 = getelementptr [4 x %struct.foo]* %L, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
30 %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
42 %tmp4 = getelementptr %struct.bar* %B, i32 0, i32 2 ; <double*> [#uses=1]
43 store double 1.000000e+01, double* %tmp4
  /external/llvm/test/CodeGen/X86/
2007-10-31-extractelement-i64.ll 17 %tmp4 = load <1 x i64>, <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
36 %tmp4 = load <1 x i64>, <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
55 %tmp4 = load i64, i64* %tmp, align 8 ; <i64> [#uses=1]
56 store i64 %tmp4, i64* %retval, align 8
75 %tmp4 = load i64, i64* %tmp, align 8 ; <i64> [#uses=1]
76 store i64 %tmp4, i64* %retval, align 8
vararg_tailcall.ll 57 %tmp4 = load i8*, i8** @sel5, align 8
59 %call = tail call i8* (i8*, i8*, i8*, ...) @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5) nounwind optsize noredzone
73 %tmp4 = load i8*, i8** @sel5, align 8
76 %call = tail call i8* (i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...) @x7(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i8* %tmp6) nounwind optsize noredzone
90 %tmp4 = load i8*, i8** @sel5, align 8
92 %call = tail call i8* (i8*, i8*, i8*, ...) @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i32 48879, i32 48879) nounwind optsize noredzone
2008-02-18-TailMergingBug.ll 10 %tmp4 = getelementptr float, float* %result, i32 2 ; <float*> [#uses=5]
11 %tmp5 = load float, float* %tmp4, align 4 ; <float> [#uses=10]
48 %tmp4.mux787 = select i1 %tmp82475, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
54 %tmp4.mux = select i1 %tmp82, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
61 %iftmp.0.0.in = phi float* [ %tmp10, %bb103 ], [ %result, %bb26 ], [ %result, %bb40 ], [ %result, %bb50 ], [ %tmp4.mux, %bb80 ], [ %tmp4.mux787, %bb72 ] ; <float*> [#uses=1]
95 %tmp4.mux791 = select i1 %tmp197483, float* %tmp4, float* %tmp7 ; <float*> [#uses=1
    [all...]
  /external/llvm/test/Transforms/IndVarSimplify/
ada-loops.ll 30 %tmp4 = getelementptr [256 x i32], [256 x i32]* %a, i32 0, i32 %tmp3 ; <i32*> [#uses=1]
31 store i32 0, i32* %tmp4
65 %tmp4 = add i32 %tmp12, 10 ; <i32> [#uses=1]
66 %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
83 %tmp4 = add i32 %tmp12, -10 ; <i32> [#uses=1]
84 %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
  /external/llvm/test/Transforms/Inline/
devirtualize-3.ll 42 %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
43 store void (i8*, i32)* bitcast (void (%struct.foo_sf_t*, i32)* @foo2 to void (i8*, i32)*), void (i8*, i32)** %tmp4
56 %tmp4 = load void (i8*, i32)*, void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
63 call void %tmp4(i8* %conv, i32 %mul)
71 %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
72 %tmp5 = load i8*, i8** %tmp4 ; <i8*> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/CodeGen/X86/
2007-10-31-extractelement-i64.ll 17 %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
36 %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
55 %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
56 store i64 %tmp4, i64* %retval, align 8
75 %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
76 store i64 %tmp4, i64* %retval, align 8
vararg_tailcall.ll 57 %tmp4 = load i8** @sel5, align 8, !tbaa !0
59 %call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5) nounwind optsize noredzone
73 %tmp4 = load i8** @sel5, align 8, !tbaa !0
76 %call = tail call i8* (i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...)* @x7(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i8* %tmp6) nounwind optsize noredzone
90 %tmp4 = load i8** @sel5, align 8, !tbaa !0
92 %call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i32 48879, i32 48879) nounwind optsize noredzone
2008-02-18-TailMergingBug.ll 9 %tmp4 = getelementptr float* %result, i32 2 ; <float*> [#uses=5]
10 %tmp5 = load float* %tmp4, align 4 ; <float> [#uses=10]
47 %tmp4.mux787 = select i1 %tmp82475, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
53 %tmp4.mux = select i1 %tmp82, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
60 %iftmp.0.0.in = phi float* [ %tmp10, %bb103 ], [ %result, %bb26 ], [ %result, %bb40 ], [ %result, %bb50 ], [ %tmp4.mux, %bb80 ], [ %tmp4.mux787, %bb72 ] ; <float*> [#uses=1]
94 %tmp4.mux791 = select i1 %tmp197483, float* %tmp4, float* %tmp7 ; <float*> [#uses=1
    [all...]
  /external/swiftshader/third_party/LLVM/test/Transforms/IndVarSimplify/
ada-loops.ll 31 %tmp4 = getelementptr [256 x i32]* %a, i32 0, i32 %tmp3 ; <i32*> [#uses=1]
32 store i32 0, i32* %tmp4
66 %tmp4 = add i32 %tmp12, 10 ; <i32> [#uses=1]
67 %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
84 %tmp4 = add i32 %tmp12, -10 ; <i32> [#uses=1]
85 %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/Transforms/Inline/
devirtualize-3.ll 42 %tmp4 = getelementptr inbounds %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
43 store void (i8*, i32)* bitcast (void (%struct.foo_sf_t*, i32)* @foo2 to void (i8*, i32)*), void (i8*, i32)** %tmp4
56 %tmp4 = load void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
63 call void %tmp4(i8* %conv, i32 %mul)
71 %tmp4 = getelementptr inbounds %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
72 %tmp5 = load i8** %tmp4 ; <i8*> [#uses=1]
  /external/libjpeg-turbo/
jfdctint.c 145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
165 tmp4 = dataptr[3] - dataptr[4];
187 * i0..i3 in the paper are tmp4..tmp7 here.
190 z1 = tmp4 + tmp7;
192 z3 = tmp4 + tmp6;
196 tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
208 dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
230 tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
252 * i0..i3 in the paper are tmp4..tmp7 here
    [all...]
jidctflt.c 76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
146 tmp4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1] * _0_125);
153 z11 = tmp4 + tmp7;
154 z12 = tmp4 - tmp7;
165 tmp4 = tmp10 - tmp5;
173 wsptr[DCTSIZE*3] = tmp3 + tmp4;
174 wsptr[DCTSIZE*4] = tmp3 - tmp4;
223 tmp4 = tmp10 - tmp5;
233 outptr[3] = range_limit[((int) (tmp3 + tmp4)) & RANGE_MASK];
234 outptr[4] = range_limit[((int) (tmp3 - tmp4)) & RANGE_MASK]
    [all...]
jfdctflt.c 62 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
79 tmp4 = dataptr[3] - dataptr[4];
97 tmp10 = tmp4 + tmp5; /* phase 2 */
129 tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
147 tmp10 = tmp4 + tmp5; /* phase 2 */
  /frameworks/av/media/libstagefright/codecs/mp3dec/src/
pvmp3_dct_16.cpp 159 int32 tmp4; local
210 tmp4 = vec[ 4] + vec[11];
212 tmp1 = (tmp3 + tmp4);
213 tmp4 = fxp_mul32_Q32((tmp3 - tmp4) << 2, Qfmt_31(0.64072886193538F));
231 tmp1 = fxp_mul32_Q32((itmp_e0 - tmp4) << 1, Qfmt_31(0.54119610014620F));
232 tmp7 = itmp_e0 + tmp4;
237 tmp4 = fxp_mul32_Q32((tmp7 - tmp6) << 1, Qfmt_31(0.70710678118655F));
243 vec[ 6] = tmp1 + tmp4;
244 vec[10] = tmp7 + tmp4;
    [all...]
  /external/syslinux/com32/lib/jpeg/
jidctflt.c 125 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; local
192 tmp4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
199 z11 = tmp4 + tmp7;
200 z12 = tmp4 - tmp7;
211 tmp4 = tmp10 + tmp5;
219 wsptr[DCTSIZE*4] = tmp3 + tmp4;
220 wsptr[DCTSIZE*3] = tmp3 - tmp4;
268 tmp4 = tmp10 + tmp5;
278 outptr[4] = descale_and_clamp((int)(tmp3 + tmp4), 3);
279 outptr[3] = descale_and_clamp((int)(tmp3 - tmp4), 3)
    [all...]
  /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
vmul.ll 156 %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
157 %tmp5 = mul <8 x i16> %tmp3, %tmp4
176 %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
177 %tmp5 = mul <4 x i32> %tmp3, %tmp4
196 %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
197 %tmp5 = mul <2 x i64> %tmp3, %tmp4
216 %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
217 %tmp5 = mul <8 x i16> %tmp3, %tmp4
236 %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
237 %tmp5 = mul <4 x i32> %tmp3, %tmp4
    [all...]
  /external/llvm/test/CodeGen/AArch64/
arm64-neon-copy.ll 57 %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
58 ret <16 x i8> %tmp4
65 %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
66 ret <8 x i16> %tmp4
73 %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
74 ret <4 x i32> %tmp4
81 %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
82 ret <2 x i64> %tmp4
89 %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
90 ret <4 x float> %tmp4
    [all...]
  /external/clang/test/CodeGen/
aarch64-neon-ldst-one.c 97 // CHECK: [[TMP4:%.*]] = bitcast <8 x i16> [[LANE]] to <8 x half>
98 // CHECK: ret <8 x half> [[TMP4]]
246 // CHECK: [[TMP4:%.*]] = bitcast <4 x i16> [[LANE]] to <4 x half>
247 // CHECK: ret <4 x half> [[TMP4]]
315 // CHECK: [[TMP4:%.*]] = load %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[RETVAL]], align 16
316 // CHECK: ret %struct.uint8x16x2_t [[TMP4]]
330 // CHECK: [[TMP4:%.*]] = bitcast %struct.uint16x8x2_t* [[RETVAL]] to i8*
332 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 16, i1 false)
348 // CHECK: [[TMP4:%.*]] = bitcast %struct.uint32x4x2_t* [[RETVAL]] to i8*
350 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 16, i1 false
    [all...]
  /external/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/
different-addrspace-addressing-mode-loops.ll 32 %tmp4 = sext i8 %tmp3 to i32
35 %tmp7 = add nsw i32 %tmp6, %tmp4
70 %tmp4 = sext i8 %tmp3 to i32
73 %tmp7 = add nsw i32 %tmp6, %tmp4
85 ; OPT: %tmp4 = load i8, i8 addrspace(3)* %scevgep4, align 1
105 %tmp4 = load i8, i8 addrspace(3)* %tmp3, align 1
106 %tmp5 = sext i8 %tmp4 to i32
144 %tmp4 = load i8, i8 addrspace(3)* %tmp3, align 1
145 %tmp5 = sext i8 %tmp4 to i32
  /external/libjpeg-turbo/simd/
jfdctint-mmx.asm 191 psubw mm2,mm1 ; mm2=data3-data4=tmp4
253 movq mm0,mm2 ; mm2=tmp4
282 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6;
283 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869;
286 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4;
290 ; tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223;
293 ; tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223);
294 ; data7 = tmp4 + z3; data5 = tmp5 + z4
    [all...]
jfdctint-sse2-64.asm 201 psubw xmm2,xmm4 ; xmm2=data3-data4=tmp4
263 movdqa xmm6,xmm2 ; xmm2=tmp4
292 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6;
293 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869;
296 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4;
300 ; tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223;
303 ; tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223);
304 ; data7 = tmp4 + z3; data5 = tmp5 + z4
    [all...]

Completed in 519 milliseconds

<<11121314151617181920>>