HomeSort by relevance Sort by last modified time
    Searched full:tail (Results 426 - 450 of 3492) sorted by null

<<11121314151617181920>>

  /external/mesa3d/src/gallium/drivers/llvmpipe/
lp_scene.h 81 struct cmd_block *tail; member in struct:cmd_block_list
100 struct cmd_block *tail; member in struct:cmd_bin
298 struct cmd_block *tail = bin->tail; local
304 if (tail == NULL || tail->count == CMD_BLOCK_MAX) {
305 tail = lp_scene_new_cmd_block( scene, bin );
306 if (!tail) {
309 assert(tail->count == 0);
313 unsigned i = tail->count
    [all...]
  /frameworks/rs/driver/runtime/arch/
x86_sse2.ll 18 %1 = tail call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %in, <4 x float> %high) nounwind readnone
19 %2 = tail call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %low) nounwind readnone
27 %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
36 %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
45 %4 = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %3) nounwind readnone
46 %5 = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %4, <4 x float> %2) nounwind readnone
60 %9 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %in, <4 x float> %4, <4 x float> %8) nounwind readnone
71 %7 = tail call <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %in, <3 x float> %3, <3 x float> %6) nounwind readnone
80 %5 = tail call <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %in, <2 x float> %2, <2 x float> %4) nounwind readnone
85 %1 = tail call float @llvm.sqrt.f32(float %in) nounwind readnon
    [all...]
neon.ll 113 %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %value, <4 x float> %high) nounwind readnone
114 %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %low) nounwind readnone
119 %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
120 %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
121 %out = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %_low, <4 x float> %_high) nounwind readonly
129 %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
130 %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
137 %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
138 %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
139 %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind read (…)
    [all...]
  /external/llvm/test/Transforms/ObjCARC/
move-and-form-retain-autorelease.ll 7 ; CHECK: tail call i8* @objc_retainAutorelease(i8* %tmp71x) [[NUW:#[0-9]+]]
85 %tmp5 = tail call %18* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %18* (i8*, i8*)*)(i8* %tmp4, i8* %tmp)
87 %tmp7 = tail call i8* @objc_retain(i8* %tmp6) nounwind
91 %tmp11 = tail call %19* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %19* (i8*, i8*)*)(i8* %tmp10, i8* %tmp9)
94 %tmp14 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, %13*)*)(i8* %tmp13, i8* %tmp12, %13* bitcast (%12* @_unnamed_cfstring_386 to %13*))
98 %tmp18 = tail call i64 %tmp17(i8* %tmp15, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_count" to %1*))
114 %tmp28 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp7, i8* %tmp27)
115 %tmp29 = tail call i8* @objc_explicit_autorelease(i8* %tmp28) nounwind
117 tail call void @objc_release(i8* %tmp7) nounwind
119 %tmp32 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp31
    [all...]
  /external/okhttp/okio/okio/src/main/java/okio/
Buffer.java 241 Segment tail = writableSegment(1);
242 int maxToCopy = (int) Math.min(byteCount, Segment.SIZE - tail.limit);
243 int bytesRead = in.read(tail.data, tail.limit, maxToCopy);
248 tail.limit += bytesRead;
263 // Omit the tail if it's still writable.
264 Segment tail = head.prev;
265 if (tail.limit < Segment.SIZE && tail.owner) {
266 result -= tail.limit - tail.pos
    [all...]
  /external/llvm/test/CodeGen/ARM/
debug-info-d16-reg.ll 15 tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !19, metadata !MDExpression()), !dbg !26
16 tail call void @llvm.dbg.value(metadata double %val, i64 0, metadata !20, metadata !MDExpression()), !dbg !26
17 tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !21, metadata !MDExpression()), !dbg !26
19 %1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %val, i32 %0) nounwind, !dbg !27
25 tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !16, metadata !MDExpression()), !dbg !30
26 tail call void @llvm.dbg.value(metadata double %val, i64 0, metadata !17, metadata !MDExpression()), !dbg !30
27 tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !18, metadata !MDExpression()), !dbg !30
29 %1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %val, i32 %0) nounwind, !dbg !31
39 tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !22, metadata !MDExpression()), !dbg !34
40 tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !23, metadata !MDExpression()), !db (…)
    [all...]
debug-info-s16-reg.ll 17 tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !8, metadata !MDExpression()), !dbg !24
18 tail call void @llvm.dbg.value(metadata float %val, i64 0, metadata !10, metadata !MDExpression()), !dbg !25
19 tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !12, metadata !MDExpression()), !dbg !26
22 %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize, !dbg !27
30 tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !14, metadata !MDExpression()), !dbg !30
31 tail call void @llvm.dbg.value(metadata float %val, i64 0, metadata !15, metadata !MDExpression()), !dbg !31
32 tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !16, metadata !MDExpression()), !dbg !32
35 %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize, !dbg !33
41 tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !17, metadata !MDExpression()), !dbg !36
42 tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !18, metadata !MDExpression()), !db (…)
    [all...]
prefetch.ll 22 tail call void @llvm.prefetch( i8* %ptr, i32 1, i32 3, i32 1 )
23 tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 1 )
35 tail call void @llvm.prefetch( i8* %tmp, i32 0, i32 3, i32 1 )
50 tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3, i32 1 )
64 tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3, i32 1 )
77 tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 0 )
  /external/llvm/test/CodeGen/X86/
lsr-delayed-fold.ll 103 tail call void undef(i32 %add22)
104 tail call void undef(i32 %add28)
105 tail call void undef(i32 %add34)
106 tail call void undef(i32 %add40)
107 tail call void undef(i32 %add46)
108 tail call void undef(i32 %add52)
109 tail call void undef(i32 %add58)
110 tail call void undef(i32 %add64)
111 tail call void undef(i32 %add70)
112 tail call void undef(i32 %add82
    [all...]
adx-intrinsics.ll 12 %ret = tail call i8 @llvm.x86.addcarryx.u32(i8 %c, i32 %a, i32 %b, i8* %ptr)
24 %ret = tail call i8 @llvm.x86.addcarryx.u64(i8 %c, i64 %a, i64 %b, i8* %ptr)
37 %ret = tail call i8 @llvm.x86.addcarry.u32(i8 %c, i32 %a, i32 %b, i8* %ptr)
50 %ret = tail call i8 @llvm.x86.addcarry.u64(i8 %c, i64 %a, i64 %b, i8* %ptr)
62 %ret = tail call i8 @llvm.x86.subborrow.u32(i8 %c, i32 %a, i32 %b, i8* %ptr)
74 %ret = tail call i8 @llvm.x86.subborrow.u64(i8 %c, i64 %a, i64 %b, i8* %ptr)
fnabs.ll 12 %fabs = tail call float @fabsf(float %a) #1
23 %fabs = tail call float @fabsf(float %a) #1
33 %fabs = tail call <4 x float> @llvm.fabs.v4f32(< 4 x float> %a) #1
44 %fabs = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) #1
54 %fabs = tail call <8 x float> @llvm.fabs.v8f32(< 8 x float> %a) #1
65 %fabs = tail call <8 x float> @llvm.fabs.v8f32(<8 x float> %a) #1
sse4a.ll 7 tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
16 tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
25 %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
35 %2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
44 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
53 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
stack-folding-3dnow.ll 6 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
15 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
24 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
33 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
42 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
51 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
60 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
69 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
78 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
87 %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"(
    [all...]
  /external/llvm/test/CodeGen/AArch64/
arm64-vcvt_n.ll 7 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 9)
15 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 12)
23 %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %a, i32 18)
31 %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %a, i32 30)
35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12)
40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9)
fdiv-combine.ll 17 tail call void @foo_3f(float %div, float %div1, float %div2)
31 tail call void @foo_3d(double %div, double %div1, double %div2)
45 tail call void @foo_3_4xf(<4 x float> %div, <4 x float> %div1, <4 x float> %div2)
59 tail call void @foo_3_2xd(<2 x double> %div, <2 x double> %div1, <2 x double> %div2)
72 tail call void @foo_2f(float %div, float %div1)
83 tail call void @foo_2d(double %div, double %div1)
  /external/llvm/test/CodeGen/Hexagon/
BranchPredict.ll 19 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
23 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind
41 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
45 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind
62 %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
67 %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add1) nounwind
  /external/llvm/test/CodeGen/Hexagon/vect/
vect-shift-imm.ll 21 %0 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %x, i32 9)
22 %1 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %x, i32 8)
23 %2 = tail call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %x, i32 7)
24 %3 = tail call i64 @llvm.hexagon.S2.asl.i.vh(i64 %x, i32 6)
25 %4 = tail call i64 @llvm.hexagon.S2.asr.i.vh(i64 %x, i32 5)
26 %5 = tail call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %x, i32 4)
  /external/llvm/test/CodeGen/Mips/
2008-08-01-AsmInline.ll 11 %asmtmp = tail call %struct.DWstruct asm "multu $2,$3", "={lo},={hi},d,d"( i32 %u, i32 %v ) nounwind
31 %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind
40 %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind
49 %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind
65 %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
68 %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
call-optimization.ll 37 tail call void @callee3()
38 tail call void @callee3()
45 tail call void @callee3()
83 %call = tail call double @ceil(double %d)
84 %call1 = tail call double @ceil(double %call)
86 %call2 = tail call double @ceil(double %call1)
  /external/llvm/test/Transforms/GVN/
2011-07-07-MatchIntrinsicExtract.ll 8 %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
20 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
44 %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
56 %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
  /external/llvm/test/Transforms/IndVarSimplify/
dont-recompute.ll 33 ; CHECK: tail call void @func(i32 %add)
34 tail call void @func(i32 %add)
43 ; CHECK-NEXT: tail call void @func(i32 %add.lcssa)
44 tail call void @func(i32 %add)
57 ; CHECK: tail call void @func(i32 %add)
58 tail call void @func(i32 %add)
  /external/llvm/test/Transforms/InstCombine/
fabs.ll 11 %fabsf = tail call float @fabsf(float %mul)
21 %fabs = tail call double @fabs(double %mul)
31 %fabsl = tail call fp128 @fabsl(fp128 %mul)
47 %fabsf = tail call float @llvm.fabs.f32(float %mul)
57 %fabs = tail call double @llvm.fabs.f64(double %mul)
67 %fabsl = tail call fp128 @llvm.fabs.f128(fp128 %mul)
vec_demanded_elts.ll 16 %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
17 %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
18 %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
19 %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
20 %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
49 %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03)
54 %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13)
59 %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23)
64 %tmp3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %v33)
67 %tmp4 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %v41
    [all...]
  /external/llvm/test/Transforms/SimplifyCFG/R600/
cttz-ctlz.ll 8 ; SI-NEXT: [[CTLZ:%[A-Za-z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %A, i1 true)
16 %0 = tail call i64 @llvm.ctlz.i64(i64 %A, i1 true)
28 ; SI-NEXT: [[CTLZ:%[A-Za-z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %A, i1 true)
36 %0 = tail call i32 @llvm.ctlz.i32(i32 %A, i1 true)
48 ; SI-NEXT: [[CTLZ:%[A-Za-z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %A, i1 true)
56 %0 = tail call i16 @llvm.ctlz.i16(i16 %A, i1 true)
68 ; SI-NEXT: [[CTTZ:%[A-Za-z0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %A, i1 true)
76 %0 = tail call i64 @llvm.cttz.i64(i64 %A, i1 true)
88 ; SI-NEXT: [[CTTZ:%[A-Za-z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %A, i1 true)
96 %0 = tail call i32 @llvm.cttz.i32(i32 %A, i1 true
    [all...]
  /external/mesa3d/src/gallium/auxiliary/util/
u_ringbuffer.c 18 unsigned tail; member in struct:util_ringbuffer
61 return (ring->tail - (ring->head + 1)) & ring->mask;
135 ring_packet = &ring->buf[ring->tail];
149 packet[i] = ring->buf[ring->tail];
150 ring->tail++;
151 ring->tail &= ring->mask;

Completed in 75 milliseconds

<<11121314151617181920>>