HomeSort by relevance Sort by last modified time
    Searched full:tail (Results 326 - 350 of 3492) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/X86/
2008-08-17-UComiCodeGenBug.ll 5 tail call i32 @llvm.x86.sse.ucomige.ss( <4 x float> %a, <4 x float> %b ) nounwind readnone
2010-02-01-TaillCallCrash.ll 10 %0 = tail call i32 bitcast (%"char[]"* @.str to i32 (i32)*)(i32 0) nounwind ; <i32> [#uses=1]
fp-stack-retcopy.ll 9 %tmp5 = tail call double @foo() nounwind ; <double> [#uses=1]
h-registers-3.ll 6 %0 = tail call zeroext i16 (...) @bar() nounwind
inline-asm-modifier-q.ll 10 tail call void asm sideeffect "movq (${0:q}, %ebx, 4), %mm0", "r,~{dirflag},~{fpsr},~{flags}"(i32* %p)
inlineasm-sched-bug.ll 10 %0 = tail call i32 asm "bsfl $1,$0\0A\09", "=r,rm,~{dirflag},~{fpsr},~{flags}"(i32 %and) nounwind
ins_split_regalloc.ll 21 ; Last call is a tail call, thus the address of the function cannot use
29 tail call void %fct_f(i32 %a)
30 tail call void %fct_f(i32 %b)
31 tail call void %fct_f(i32 %c)
powi.ll 6 %0 = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
tbm-intrinsics-x86_64.ll 8 %0 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %a, i32 2814)
20 %0 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %tmp1, i32 2814)
29 %0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %a, i64 2814)
41 %0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %tmp1, i64 2814)
vec_shift5.ll 8 %1 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> <i16 1, i16 2, i16 4, i16 8, i16 1, i16 2, i16 4, i16 8>, i32 3)
17 %1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
26 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
35 %1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 1, i32 2, i32 4, i32 8>, i32 3)
44 %1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
53 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
62 %1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 1, i64 2>, i32 3)
71 %1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 8, i64 16>, i32 3)
80 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
89 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3
    [all...]
  /external/llvm/test/Transforms/CorrelatedValuePropagation/
icmp.ll 23 tail call void @check1(i1 %tmp47) #4
28 tail call void @check2(i1 %tmp48) #4
54 tail call void @check1(i1 %tmp47) #0
59 tail call void @check2(i1 %tmp48) #4
  /external/llvm/test/Transforms/FunctionAttrs/
noreturn.ll 15 tail call void @endless_loop()
  /external/llvm/test/Transforms/Inline/
inline-optnone.ll 24 %0 = tail call i32 @alwaysInlineFunction(i32 %a)
25 %1 = tail call i32 @simpleFunction(i32 %a)
38 %0 = tail call i32 @OptnoneFunction(i32 5)
39 %1 = tail call i32 @simpleFunction(i32 6)
  /external/llvm/test/Transforms/InstSimplify/
call-callconv.ll 8 %call = tail call arm_aapcscc i32 @abs(i32 %i) nounwind readnone
20 %call = tail call arm_aapcscc i32 @labs(i32 %i) nounwind readnone
32 %call = tail call arm_aapcscc i32 @strlen(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0))
41 %call = tail call arm_aapcscc i32 @strlen(i8* %str)
  /external/llvm/test/Transforms/LoadCombine/
load-combine-assume.ll 12 ; CHECK-DAG: tail call void @llvm.assume(i1 %b)
19 tail call void @llvm.assume(i1 %b)
31 ; CHECK-DAG: tail call void @llvm.assume(i1 %b)
37 tail call void @llvm.assume(i1 %b)
  /external/llvm/test/Transforms/ObjCARC/
post-inlining.ll 17 %0 = tail call i8* @objc_retain(i8* %call.i) nounwind
18 %1 = tail call i8* @objc_autoreleaseReturnValue(i8* %0) nounwind
30 %0 = tail call i8* @objc_retain(i8* %call.i) nounwind
31 %1 = tail call i8* @objc_autoreleaseReturnValue(i8* %call.i) nounwind
  /external/llvm/test/Transforms/InstCombine/
bswap-fold.ll 16 %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
25 %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
36 %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
45 %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
46 %tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
55 %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
66 %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
68 %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
77 %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
79 %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwin
    [all...]
  /external/clang/test/CXX/temp/temp.fct.spec/temp.arg.explicit/
p3-0x.cpp 7 template<typename Head, typename ...Tail>
8 struct count<Head, Tail...> {
9 static const unsigned value = 1 + count<Tail...>::value;
  /external/llvm/test/CodeGen/AArch64/
arm64-dead-register-def-bug.ll 16 %tmp1 = tail call float @ceilf(float 2.000000e+00)
21 tail call void @foo()
26 tail call void @bar(i32 %tmp3)
arm64-vclz.ll 7 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
15 %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
23 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
31 %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
39 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
47 %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
55 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
63 %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
71 %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwind
79 %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwin
    [all...]
regress-tail-livereg.ll 11 ; which makes it a natural choice for the tail call itself. But we don't
16 tail call void %func()
29 tail call void %faddr()
tailcall-mem-intrinsics.ll 7 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
15 tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
23 tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false)
  /external/llvm/test/CodeGen/ARM/
2010-11-29-PrologueBug.ll 22 %0 = tail call i32* @foo(i32* %x) nounwind
23 %1 = tail call i32* @foo(i32* %0) nounwind
24 %2 = tail call i32* @foo(i32* %1) nounwind
bswap16.ll 8 %1 = tail call i16 @llvm.bswap.i16(i16 %0)
21 %0 = tail call i16 @llvm.bswap.i16(i16 %in)
34 %1 = tail call i16 @llvm.bswap.i16(i16 %0)
inlineasm-imm-arm.ll 11 tail call void asm sideeffect ".word $0", "J"( i32 4080 ) nounwind
17 tail call void asm sideeffect ".word $0", "K"( i32 16777215 ) nounwind
23 tail call void asm sideeffect ".word $0", "L"( i32 -65280 ) nounwind

Completed in 73 milliseconds

<<11121314151617181920>>