HomeSort by relevance Sort by last modified time
    Searched full:tmp4 (Results 376 - 400 of 1043) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/X86/
div8.ll 20 %tmp4 = load i8, i8* %quotient, align 1
21 ret i8 %tmp4
stride-nine-with-base-reg.ll 22 %tmp4 = mul i8 %tmp3, 2
24 store i8 %tmp4, i8* %tmp5, align 4
stride-reuse.ll 18 %tmp4 = fmul float %tmp3, 2.000000e+00
20 store float %tmp4, float* %tmp5, align 4
tailcallbyval64.ll 40 %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
41 ret i64 %tmp4
use-add-flags.ll 17 %tmp4 = add i32 %tmp2, %y ; <i32> [#uses=1]
18 %tmp5 = icmp slt i32 %tmp4, 0 ; <i1> [#uses=1]
  /external/llvm/test/Feature/
aliases.ll 38 %tmp4 = call %FunTy @bar_f()
39 %tmp5 = add i32 %tmp3, %tmp4
ppcld.ll 19 %tmp4 = fpext double %tmp3 to ppc_fp128 ; <ppc_fp128> [#uses=1]
20 store ppc_fp128 %tmp4, ppc_fp128* @ld
x86ld.ll 19 %tmp4 = fpext double %tmp3 to x86_fp80 ; <x86_fp80> [#uses=1]
20 store x86_fp80 %tmp4, x86_fp80* @ld
  /external/llvm/test/Transforms/ArgumentPromotion/
byval.ll 23 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
24 store i64 2, i64* %tmp4, align 4
  /external/llvm/test/Transforms/IndVarSimplify/
loop_evaluate11.ll 25 %tmp4 = add i32 %order_start.0, 2 ; <i32> [#uses=1]
26 %tmp5 = add i32 %tmp4, undef ; <i32> [#uses=1]
  /external/llvm/test/Transforms/InstCombine/
2008-04-29-VolatileLoadDontMerge.ll 15 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
16 store volatile i32 %tmp4, i32* @g_1, align 4
2008-07-08-VolatileLoadMerge.ll 16 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
17 store volatile i32 %tmp4, i32* @g_1, align 4
2010-11-01-lshr-mask.ll 32 %tmp4 = and i8 %arg1, 33
35 %tmp7 = or i8 %tmp4, %tmp6
bitcast-bigendian.ll 16 %tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1]
18 %add = fadd float %tmp24, %tmp4
24 ; CHECK-NEXT: %tmp4 = extractelement <2 x float> {{.*}}, i32 1
25 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
38 %tmp4 = bitcast i32 %tmp2 to float
40 %add = fadd float %tmp24, %tmp4
46 ; CHECK-NEXT: %tmp4 = extractelement <4 x float> {{.*}}, i32 1
47 ; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
neon-intrinsics.ll 18 %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
20 call void @llvm.arm.neon.vst4.p0i8.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
  /external/llvm/test/Transforms/LoopUnroll/
shifted-tripcount.ll 19 %tmp4 = load double, double* %arrayidx ; <double> [#uses=1]
21 %mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
  /external/llvm/test/Transforms/LoopVectorize/X86/
reduction-crash.ll 21 %tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ]
26 %tmp9 = fadd fast double %tmp4, undef
  /external/llvm/test/Transforms/LoopVectorize/
phi-hang.ll 39 %tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
40 %tmp4 = or i32 %tmp2, %tmp3
  /external/llvm/test/Transforms/ObjCARC/
contract-storestrong-ivar.ll 25 %tmp4 = tail call i8* @objc_retain(i8* %tmp3) nounwind
28 %tmp6 = bitcast i8* %tmp4 to %1*
  /external/llvm/test/Transforms/SLPVectorizer/X86/
pr23510.ll 27 %tmp4 = load i64, i64* %a, align 8
28 %shr5 = lshr i64 %tmp4, 4
  /external/llvm/test/Transforms/StraightLineStrengthReduce/AMDGPU/
reassociate-geps-and-slsr-addrspace.ll 17 %tmp4 = bitcast float addrspace(1)* %out to i32 addrspace(1)*
18 store i32 %v11, i32 addrspace(1)* %tmp4, align 4
44 %tmp4 = bitcast float addrspace(1)* %out to i32 addrspace(1)*
45 store i32 %v11, i32 addrspace(1)* %tmp4, align 4
71 %tmp4 = bitcast float addrspace(1)* %out to i32 addrspace(1)*
72 store i32 %v11, i32 addrspace(1)* %tmp4, align 4
96 %tmp4 = bitcast float addrspace(1)* %out to i32 addrspace(1)*
97 store i32 %v11, i32 addrspace(1)* %tmp4, align 4
  /external/swiftshader/third_party/LLVM/test/Analysis/BasicAA/
full-store-partial-alias.ll 23 %tmp4 = bitcast %union.anon* %u to [2 x i32]*
24 %arrayidx = getelementptr inbounds [2 x i32]* %tmp4, i64 0, i64 %idxprom
pure-const-dce.ll 13 %tmp4 = call i32 @TestConst( i32 5 ) readnone ; <i32> [#uses=1]
21 %sum3 = add i32 %sum2, %tmp4 ; <i32> [#uses=1]
  /external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
fast-isel-static.ll 15 %tmp4 = load float** %sum.addr, align 4
16 store float %add, float* %tmp4
  /external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/
2007-04-30-InlineAsmEarlyClobber.ll 24 %tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
28 %tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]

Completed in 707 milliseconds

<<11121314151617181920>>