HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 351 - 375 of 1043) sorted by null

<<11121314151617181920>>

  /external/llvm/test/Transforms/GlobalDCE/
complex-constantexpr.ll 27 %tmp4 = xor i32 %tmp3, zext (i1 icmp ne (i64 ptrtoint (i32* @global5 to i64), i64 1) to i32)
28 store i32 %tmp4, i32* @global5, align 4
80 %tmp4 = icmp sgt i32 %tmp3, 0
81 %tmp5 = zext i1 %tmp4 to i32
  /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
vlddup.ll 73 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer
74 %tmp5 = add <8 x i8> %tmp2, %tmp4
87 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
88 %tmp5 = add <4 x i16> %tmp2, %tmp4
101 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
102 %tmp5 = add <4 x i16> %tmp2, %tmp4
116 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer
117 %tmp5 = add <2 x i32> %tmp2, %tmp4
137 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer
140 %tmp7 = add <8 x i8> %tmp2, %tmp4
    [all...]
  /external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/
2007-09-08-unaligned.ll 24 %tmp4 = load double* %tmp3, align 1 ; <double> [#uses=1]
26 store double %tmp4, double* %tmp5, align 1
42 %tmp4 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
43 %tmp5 = load double* %tmp4, align 1 ; <double> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/CodeGen/X86/
vec_shuffle.ll 10 %tmp4 = insertelement <4 x float> %tmp2, float %Y, i32 2 ; <<4 x float>> [#uses=1]
11 %tmp6 = insertelement <4 x float> %tmp4, float %Y, i32 3 ; <<4 x float>> [#uses=1]
30 %tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 6 ; <i16> [#uses=1]
38 %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 4 ; <<8 x i16>> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/Transforms/GlobalOpt/
2008-02-16-NestAttr.ll 15 %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
16 %tmp7 = icmp eq i32 %tmp4, %m ; <i1> [#uses=1]
23 %tmp4 = tail call fastcc i32 @g.1478( %struct.FRAME.nest* nest %CHAIN.2, i32 %m ) nounwind ; <i32> [#uses=1]
26 %tmp9 = icmp eq i32 %tmp4, %tmp7 ; <i1> [#uses=1]
  /external/llvm/test/Analysis/BasicAA/
full-store-partial-alias.ll 25 %tmp4 = bitcast %union.anon* %u to [2 x i32]*
26 %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
  /external/llvm/test/CodeGen/AArch64/
arm64-simd-scalar-to-vector.ll 17 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <16 x i32> zeroinitializer
18 ret <16 x i8> %tmp4
arm64-vext.ll 66 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
67 %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
87 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
88 %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
108 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
109 %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
129 %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
130 %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
150 %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
151 %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8
    [all...]
  /external/llvm/test/CodeGen/AMDGPU/
convergent-inlineasm.ll 16 %tmp4 = getelementptr i64, i64 addrspace(1)* %arg, i32 %tmp
36 %tmp4 = getelementptr i64, i64 addrspace(1)* %arg, i32 %tmp
promote-alloca-volatile.ll 38 %tmp4 = load double, double* %tmp, align 8
41 store double %tmp4, double addrspace(1)* %arg
  /external/llvm/test/CodeGen/ARM/
zextload_demandedbits.ll 23 %tmp4 = bitcast i16* %tmp1 to i8*
28 %tmp15 = tail call i32 @widget(%struct.barney* %tmp14, i8* %tmp4, i32 %tmp7)
  /external/llvm/test/CodeGen/PowerPC/
2007-04-30-InlineAsmEarlyClobber.ll 24 %tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
28 %tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]
buildvec_canonicalize.ll 6 %tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
7 store <4 x float> %tmp4, <4 x float>* %P3
return-val-i128.ll 16 %tmp4 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
17 %tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
rlwimi3.ll 8 %tmp4 = and i32 %srcA, 31775 ; <i32> [#uses=1]
9 %tmp5 = or i32 %tmp2, %tmp4 ; <i32> [#uses=1]
vcmp-fold.ll 11 %tmp4 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
13 %tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
vec_veqv_vnand_vorc.ll 26 %tmp4 = or <4 x i32> %tmp3, %x
28 ret <4 x i32> %tmp4
  /external/llvm/test/CodeGen/X86/
2006-05-02-InstrSched1.ll 18 %tmp4 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
22 %tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
2006-05-08-InstrSched.ll 13 %tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1]
15 %tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
2007-11-06-InstrSched.ll 12 %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
13 %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
2010-04-08-CoalescerBug.ll 19 %tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0
21 %tmp6 = bitcast i32* %tmp4 to i8*
avoid-loop-align.ll 33 %tmp4 = sub i32 %tmp3, %tmp ; <i32> [#uses=1]
34 %tmp5 = getelementptr [100 x i32], [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
byval2.ll 38 %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2
39 store i64 %c, i64* %tmp4, align 16
call-push.ll 15 %tmp4 = load i32, i32* %tmp23 ; <i32> [#uses=1]
16 %tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
codegen-prepare-cast.ll 21 %tmp4 = getelementptr i8, i8* %tmp, i32 undef ; <i8*> [#uses=1]
22 %tmp5 = load i8, i8* %tmp4 ; <i8> [#uses=0]

Completed in 1094 milliseconds

<<11121314151617181920>>