/external/llvm/test/CodeGen/AArch64/ |
arm64-vcvt_n.ll | 7 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 9) 15 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 12) 23 %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %a, i32 18) 31 %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %a, i32 30) 35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12) 40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9)
|
/external/llvm/test/CodeGen/ARM/ |
prefetch.ll | 22 tail call void @llvm.prefetch( i8* %ptr, i32 1, i32 3, i32 1 ) 23 tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 1 ) 35 tail call void @llvm.prefetch( i8* %tmp, i32 0, i32 3, i32 1 ) 50 tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3, i32 1 ) 64 tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3, i32 1 ) 77 tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 0 )
|
arm-ttype-target2.ll | 16 %2 = tail call i32 @llvm.eh.typeid.for(i8* bitcast ({ i8*, i8* }* @_ZTI3Foo to i8*)) nounwind 24 %4 = tail call i8* @__cxa_begin_catch(i8* %3) nounwind 25 tail call void @__cxa_end_catch()
|
fp16.ll | 16 %2 = tail call float @llvm.convert.from.fp16(i16 %0) 19 %3 = tail call float @llvm.convert.from.fp16(i16 %1) 23 %5 = tail call i16 @llvm.convert.to.fp16(float %4)
|
/external/llvm/test/CodeGen/Hexagon/ |
BranchPredict.ll | 19 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind 23 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind 41 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind 45 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind 62 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind 67 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add1) nounwind
|
combine_ir.ll | 9 tail call void @bar(i64 %1) nounwind 27 tail call void @bar(i64 %ins) nounwind 43 tail call void @bar(i64 %ins) nounwind
|
/external/llvm/test/CodeGen/Mips/ |
2008-08-01-AsmInline.ll | 11 %asmtmp = tail call %struct.DWstruct asm "multu $2,$3", "={lo},={hi},d,d"( i32 %u, i32 %v ) nounwind 31 %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind 40 %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind 49 %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind 65 %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"() 68 %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
|
call-optimization.ll | 37 tail call void @callee3() 38 tail call void @callee3() 45 tail call void @callee3() 83 %call = tail call double @ceil(double %d) 84 %call1 = tail call double @ceil(double %call) 86 %call2 = tail call double @ceil(double %call1)
|
i64arg.ll | 13 tail call void @ff1(i32 %i, i64 1085102592623924856) nounwind 18 tail call void @ff2(i64 %ll, double 3.000000e+00) nounwind 26 tail call void @ff3(i32 %i, i64 %ll, i32 %sub, i64 %ll1) nounwind
|
/external/llvm/test/CodeGen/X86/ |
sse4a.ll | 6 tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind 15 tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind 24 %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2) 34 %2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind 43 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6) 52 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
|
ctpop-combine.ll | 6 %count = tail call i64 @llvm.ctpop.i64(i64 %x) 20 %count = tail call i64 @llvm.ctpop.i64(i64 %x) 32 %count = tail call i64 @llvm.ctpop.i64(i64 %x)
|
mmx-shift.ll | 7 %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; <x86_mmx> [#uses=1] 19 %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; <x86_mmx> [#uses=1] 31 %tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone ; <x86_mmx> [#uses=1]
|
vec_shift3.ll | 7 %tmp3 = tail call <2 x i64> @llvm.x86.sse2.pslli.q( <2 x i64> %x1, i32 %bits ) nounwind readnone ; <<2 x i64>> [#uses=1] 13 %tmp3 = tail call <2 x i64> @llvm.x86.sse2.pslli.q( <2 x i64> %x1, i32 10 ) nounwind readnone ; <<2 x i64>> [#uses=1] 20 %tmp4 = tail call <8 x i16> @llvm.x86.sse2.psrai.w( <8 x i16> %tmp2, i32 %bits ) nounwind readnone ; <<8 x i16>> [#uses=1]
|
x86-upgrade-avx-vbroadcast.ll | 15 %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0) 24 %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0) 33 %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
|
/external/llvm/test/Transforms/GVN/ |
2011-07-07-MatchIntrinsicExtract.ll | 8 %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) 20 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 44 %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) 56 %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
|
/external/llvm/test/Transforms/IndVarSimplify/ |
dont-recompute.ll | 33 ; CHECK: tail call void @func(i32 %add) 34 tail call void @func(i32 %add) 43 ; CHECK-NEXT: tail call void @func(i32 %add.lcssa) 44 tail call void @func(i32 %add) 57 ; CHECK: tail call void @func(i32 %add) 58 tail call void @func(i32 %add)
|
/external/mesa3d/src/gallium/auxiliary/util/ |
u_ringbuffer.c | 18 unsigned tail; member in struct:util_ringbuffer 61 return (ring->tail - (ring->head + 1)) & ring->mask; 135 ring_packet = &ring->buf[ring->tail]; 149 packet[i] = ring->buf[ring->tail]; 150 ring->tail++; 151 ring->tail &= ring->mask;
|
/prebuilts/misc/common/swig/include/2.0.11/mzscheme/ |
std_vector.i | 58 Scheme_Object *head, *tail; 60 tail = $input; 61 while (!SCHEME_NULLP(tail)) { 62 head = scheme_car(tail); 63 tail = scheme_cdr(tail); 91 Scheme_Object *head, *tail; 92 tail = $input; 93 while (!SCHEME_NULLP(tail)) { 94 head = scheme_car(tail); [all...] |
/external/chromium_org/mojo/public/tools/bindings/pylib/mojom/generate/ |
template_expander.py | 21 path, tail = os.path.split(path) 22 assert tail 23 if tail == dirname:
|
/external/chromium_org/third_party/markdown/extensions/ |
attr_list.py | 118 if len(elem) and elem[-1].tail: 119 # has children. Get from tail of last child 120 m = RE.search(elem[-1].tail) 123 elem[-1].tail = elem[-1].tail[:m.start()] 126 elem[-1].tail = elem[-1].tail.rstrip('#').rstrip() 137 # inline: check for attrs at start of tail 138 if elem.tail: 139 m = self.INLINE_RE.match(elem.tail) [all...] |
/external/llvm/test/Analysis/TypeBasedAliasAnalysis/ |
memcpyopt.ll | 9 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %q, i64 16, i32 1, i1 false), !tbaa !0 13 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %q, i64 16, i32 1, i1 false), !tbaa !2 15 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %q, i8* %p, i64 16, i32 1, i1 false), !tbaa !2
|
/external/llvm/test/CodeGen/MSP430/ |
2009-12-22-InlineAsm.ll | 11 %0 = tail call i8* asm "", "=r,0"(i8* getelementptr inbounds ([10 x i8]* @buf, i16 0, i16 0)) nounwind ; <i8*> [#uses=1] 19 tail call void @abort() nounwind 23 tail call void @exit(i16 0) nounwind
|
/external/llvm/test/CodeGen/PowerPC/ |
inlineasm-copy.ll | 6 %tmp = tail call i32 asm "foo $0", "=r"( ) ; <i32> [#uses=1] 12 %tmp1 = tail call i32 asm "foo $0, $1", "=r,r"( i32 %X ) ; <i32> [#uses=1] 19 %tmp1 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm sideeffect "foo $0, $1", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19"( i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y ) ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Thumb/ |
2011-06-16-NoGPRs.ll | 19 %call14 = tail call i8* (i8*, i8*, ...)* (i8*, i8*)* @f1(i8* undef, i8* %_cmd) optsize 21 tail call void %0(i8* %self, i8* %_cmd, %0* %inObjects, %0* %inIndexes) optsize 22 tail call void bitcast (i8* (i8*, i8*, ...)* @f2 to void (i8*, i8*, i32, %0*, %0*)*)(i8* %self, i8* undef, i32 2, %0* %inIndexes, %0* undef) optsize
|
inlineasm-imm-thumb.ll | 11 tail call void asm sideeffect ".word $0", "J"( i32 -255 ) nounwind 17 tail call void asm sideeffect ".word $0", "K"( i32 65280 ) nounwind 41 tail call void asm sideeffect "add sp, sp, $0; add sp, sp, $1", "O,O"( i32 -508, i32 508 ) nounwind
|