HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 751 - 775 of 1043) sorted by null

<<31323334353637383940>>

  /external/llvm/test/Transforms/LoopStrengthReduce/
addrec-gep.ll 31 %tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
36 %z1 = add i64 %tmp4, 5203
  /external/llvm/test/Transforms/PGOProfile/
icp_invoke.ll 41 %tmp4 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
42 %matches = icmp eq i32 %tmp3, %tmp4
  /external/llvm/test/Transforms/SimplifyCFG/
basictest.ll 110 %tmp4 = icmp eq i8 %tmp3, 1
111 br i1 %tmp4, label %bb2, label %bb3
iterative-simplify.ll 26 %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
27 %tmp5 = icmp sgt i32 %tmp4, 262144 ; <i1> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
2010-05-20-NEONSpillCrash.ll 42 %tmp4 = sub <8 x i8> %tmp4efgh, %tmp4abcd ; <<8 x i8>> [#uses=1]
44 ret <8 x i8> %tmp4
vqdmul.ll 206 %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
207 ret <4 x i32> %tmp4
216 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
217 ret <2 x i64> %tmp4
247 %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
248 ret <4 x i32> %tmp4
257 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
258 ret <2 x i64> %tmp4
  /external/swiftshader/third_party/LLVM/test/CodeGen/Alpha/
jmp_table.ll 53 %tmp4 = getelementptr [2 x i8]* @str2, i32 0, i64 0 ; <i8*> [#uses=1]
54 store i8* %tmp4, i8** %foo
  /external/swiftshader/third_party/LLVM/test/CodeGen/Mips/
alloca.ll 18 %tmp4 = alloca i8, i32 %size, align 4
21 %call7 = call i32 @foo(i8* %tmp4) nounwind
  /external/swiftshader/third_party/LLVM/test/CodeGen/Thumb2/
2010-02-11-phi-cycle.ll 20 %tmp4 = zext i32 %1 to i64 ; <i64> [#uses=1]
22 %ins = or i64 %tmp4, %mask ; <i64> [#uses=2]
  /external/swiftshader/third_party/LLVM/test/CodeGen/X86/
2006-11-12-CSRetCC.ll 27 %tmp4 = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
28 %tmp5 = load double* %tmp4 ; <double> [#uses=1]
2007-11-30-LoadFolding-Bug.ll 42 %tmp4.i19.i = icmp slt i32 %tmp1.i18.i, %radix ; <i1> [#uses=1]
43 %x.0.i21.i = select i1 %tmp4.i19.i, i32 %tmp1.i18.i, i32 0 ; <i32> [#uses=1]
2008-02-22-LocalRegAllocBug.ll 20 %tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
21 %tmp5 = getelementptr i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
2010-04-06-SSEDomainFixCrash.ll 44 %tmp4.i = insertelement <4 x float> %tmp8.i, float %4, i32 1 ; <<4 x float>> [#uses=1]
45 %tmp.i = insertelement <4 x float> %tmp4.i, float %5, i32 2 ; <<4 x float>> [#uses=1]
loop-strength-reduce4.ll 30 %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
33 %tmp8 = xor i32 %tmp7, %tmp4 ; <i32> [#uses=2]
lsr-normalization.ll 21 %tmp4 = getelementptr inbounds %0* %tmp, i64 0, i32 1 ; <%0**> [#uses=1]
22 store %0* %tmp, %0** %tmp4
muloti.ll 14 %tmp4 = shl nuw i128 %tmp3, 64
15 %ins = or i128 %tmp4, %tmp6
x86-64-sret-return.ll 19 %tmp4 = getelementptr [4 x i64]* %tmp2, i32 0, i32 0 ; <i64*> [#uses=1]
20 %tmp5 = load i64* %tmp4, align 8 ; <i64> [#uses=1]
xor.ll 28 %tmp4 = lshr i32 %tmp3, 1
29 ret i32 %tmp4
  /external/swiftshader/third_party/LLVM/test/Transforms/IndVarSimplify/
addrec-gep.ll 27 %tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
32 %z1 = add i64 %tmp4, 5203
  /external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/
vec_shuffle.ll 67 %tmp4 = extractelement <4 x float> %tmp, i32 1
70 %tmp128 = insertelement <4 x float> undef, float %tmp4, i32 0
  /external/swiftshader/third_party/LLVM/test/Transforms/ScalarRepl/
2009-12-11-NeonTypes.ll 78 %tmp4 = bitcast %struct._NSRange* %range to i8*
79 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp3, i8* %tmp4, i32 8, i32 8, i1 false)
debuginfo-preserved.ll 31 %tmp4 = load i32* %b.addr, align 4, !dbg !15
32 %sub = sub nsw i32 %tmp3, %tmp4, !dbg !15
  /external/swiftshader/third_party/LLVM/test/Transforms/SimplifyCFG/
iterative-simplify.ll 26 %tmp4 = load i32* %i ; <i32> [#uses=1]
27 %tmp5 = icmp sgt i32 %tmp4, 262144 ; <i1> [#uses=1]
  /external/libjpeg-turbo/simd/
jchuff-sse2-64.asm 265 pcmpeqw xmm4, xmm8 ; tmp4 = _mm_cmpeq_epi16(tmp4, zero);
271 packsswb xmm4, xmm5 ; tmp4 = _mm_packs_epi16(tmp4, tmp5);
275 pmovmskb r13d, xmm4 ; index = ((uint64_t)_mm_movemask_epi8(tmp4)) << 32;
  /external/llvm/test/Transforms/SLPVectorizer/X86/
fround.ll 78 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP2]])
80 ; SSE41-NEXT: store <2 x double> [[TMP4]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2) to <2 x double>*), align 8
136 ; SSE41-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 6) to <2 x double>*), align 8
140 ; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP4]])
151 ; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP2]])
153 ; AVX1-NEXT: store <4 x double> [[TMP4]], <4 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4) to <4 x double>*), align 8
160 ; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP2]])
162 ; AVX2-NEXT: store <4 x double> [[TMP4]], <4 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4) to <4 x double>*), align 8
249 ; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP2]])
251 ; SSE41-NEXT: store <2 x double> [[TMP4]], <2 x double>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 (…)
    [all...]

Completed in 613 milliseconds

<<31323334353637383940>>