HomeSort by relevance Sort by last modified time
    Searched full:tmp1 (Results 276 - 300 of 1307) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/AArch64/
arm64-dead-def-frame-index.ll 8 %tmp1 = alloca i8
10 %tmp3 = icmp eq i8* %tmp1, null
arm64-extend.ll 12 %tmp1 = load i32, i32* %arrayidx, align 4
13 %conv = sext i32 %tmp1 to i64
arm64-extload-knownzero.ll 7 %tmp1 = icmp ult i32 %a, 100
8 br i1 %tmp1, label %bb1, label %bb2
arm64-redzone.ll 13 %tmp1 = load i32, i32* %b.addr, align 4
14 %add = add nsw i32 %tmp, %tmp1
  /external/llvm/test/CodeGen/AMDGPU/
insert_subreg.ll 12 %tmp1 = ptrtoint [16 x i32]* %tmp0 to i32
13 %tmp2 = sext i32 %tmp1 to i64
reorder-stores.ll 11 %tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16
14 store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
25 %tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16
28 store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
45 %tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32
48 store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
60 %tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8
62 %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
rotr.i64.ll 12 %tmp1 = shl i64 %x, %tmp0
14 %tmp3 = or i64 %tmp1, %tmp2
32 %tmp1 = shl i64 %x, %tmp0
34 %tmp3 = or i64 %tmp1, %tmp2
43 %tmp1 = shl <2 x i64> %x, %tmp0
45 %tmp3 = or <2 x i64> %tmp1, %tmp2
56 %tmp1 = shl <2 x i64> %x, %tmp0
58 %tmp3 = or <2 x i64> %tmp1, %tmp2
  /external/llvm/test/CodeGen/ARM/
atomic-cmp.ll 16 %tmp1 = extractvalue { i8, i1 } %tmp0, 0
17 ret i8 %tmp1
extload-knownzero.ll 7 %tmp1 = icmp ult i32 %a, 100
8 br i1 %tmp1, label %bb1, label %bb2
ifcvt6.ll 7 %tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
9 %tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
v1-constant-fold.ll 9 %tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 1
10 %tmp2 = insertelement <4 x i32> %tmp1, i32 0, i32 2
vshiftins.ll 6 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
33 %tmp1 = load <1 x i64>, <1 x i64>* %A
35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
42 %tmp1 = load <16 x i8>, <16 x i8>* %A
44 %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 (…)
    [all...]
  /external/llvm/test/CodeGen/Mips/
mipslopat.ll 12 %tmp1 = load i32*, i32** @stat_ptr_vol_int, align 4
13 %1 = bitcast i32* %tmp1 to i8*
  /external/llvm/test/CodeGen/PowerPC/
2006-07-07-ComputeMaskedBits.ll 8 %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
9 %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
2006-10-17-brcc-miscompile.ll 8 %tmp1 = and i32 %X, 3 ; <i32> [#uses=1]
9 %tmp2 = xor i32 %tmp1, 1 ; <i32> [#uses=1]
2008-06-23-LiveVariablesCrash.ll 9 %tmp1 = load i8, i8* null, align 1
10 %tmp2 = icmp eq i8 %tmp1, 0
frounds.ll 8 %tmp1 = call i32 @llvm.flt.rounds( ) ; <i32> [#uses=1]
9 store i32 %tmp1, i32* %tmp, align 4
  /external/llvm/test/CodeGen/Thumb2/
thumb2-cmn.ll 41 %tmp1 = icmp eq i32 %nb, %a
42 ret i1 %tmp1
50 %tmp1 = icmp ne i32 %nb, %a
51 ret i1 %tmp1
59 %tmp1 = icmp eq i32 %a, %nb
60 ret i1 %tmp1
70 %tmp1 = icmp ne i32 %a, %nb
71 ret i1 %tmp1
  /external/llvm/test/CodeGen/X86/
2007-01-08-InstrSched.ll 5 %tmp1 = fmul float %x, 3.000000e+00
9 %tmp10 = fadd float %tmp1, %tmp3
2007-06-28-X86-64-isel.ll 4 %tmp1 = call <8 x i16> @llvm.x86.sse2.pmins.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 7, i32 7, i32 7, i32 7 > to <8 x i16>) )
5 %tmp2 = bitcast <8 x i16> %tmp1 to <4 x i32>
2007-11-04-LiveVariablesBug.ll 9 %tmp1 = lshr i64 %bytes, 8 ; <i64> [#uses=1]
10 %tmp12 = trunc i64 %tmp1 to i32 ; <i32> [#uses=2]
atom-lea-addw-bug.ll 8 %tmp1 = load i16, i16* undef, align 2
12 %add55 = add i16 %tmp17, %tmp1
avx1-logical-load-folding.ll 8 %tmp1 = bitcast float* %A to <8 x float>*
9 %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
22 %tmp1 = bitcast float* %A to <8 x float>*
23 %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
36 %tmp1 = bitcast float* %A to <8 x float>*
37 %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
49 %tmp1 = bitcast float* %A to <8 x float>*
50 %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
bc-extract.ll 8 %tmp1 = extractelement <2 x float> %tmp0, i32 0
9 ret float %tmp1
inline-asm-x-scalar.ll 19 %tmp1 = tail call float asm "", "=x,0,~{dirflag},~{fpsr},~{flags}"( float 0x47EFFFFFE0000000 ); <float> [#uses=1]
20 %tmp4 = fsub float %tmp1, 0x3810000000000000 ; <float> [#uses=1]

Completed in 1919 milliseconds

<<11121314151617181920>>