/external/llvm/test/CodeGen/CellSPU/ |
intrinsics_branch.ll | 10 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" 17 declare <8 x i16> @llvm.spu.si.ceqh(<8 x i16>, <8 x i16>) 18 declare <4 x i32> @llvm.spu.si.ceqi(<4 x i32>, i16) 19 declare <8 x i16> @llvm.spu.si.ceqhi(<8 x i16>, i16) 24 declare <8 x i16> @llvm.spu.si.cgth(<8 x i16>, <8 x i16> [all...] |
vec_const.ll | 22 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128" 66 define <8 x i16> @v8i16_constvec_1() { 67 ret <8 x i16> < i16 32767, i16 32767, i16 32767, i16 32767, 68 i16 32767, i16 32767, i16 32767, i16 32767 [all...] |
select_bits.ll | 7 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" 167 define <8 x i16> @selectbits_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { 168 %C = and <8 x i16> %rC, %rB 169 %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1 [all...] |
stores.ll | 20 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" 43 define void @store_v8i16_1(<8 x i16>* %a) nounwind { 45 store <8 x i16> < i16 1, i16 2, i16 1, i16 1, i16 1, i16 2, i16 1, i16 1 >, <8 x i16>* % [all...] |
/external/llvm/test/CodeGen/X86/ |
2009-09-21-NoSpillLoopCount.ll | 3 define void @dot(i16* nocapture %A, i32 %As, i16* nocapture %B, i32 %Bs, i16* nocapture %C, i32 %N) nounwind ssp { 15 %2 = getelementptr i16* %A, i32 %1 ; <i16*> [#uses=1] 16 %3 = load i16* %2, align 2 ; <i16> [#uses=1] 17 %4 = sext i16 %3 to i32 ; <i32> [#uses=1] 19 %6 = getelementptr i16* %B, i32 %5 ; <i16*> [#uses=1 [all...] |
2008-04-16-CoalescerBug.ll | 14 %result.0163.us = trunc i32 %result.0.us to i16 ; <i16> [#uses=2] 15 shl i16 %result.0163.us, 7 ; <i16>:0 [#uses=1] 16 %tmp106.us = and i16 %0, -1024 ; <i16> [#uses=1] 17 shl i16 %result.0163.us, 2 ; <i16>:1 [#uses=1] 18 %tmp109.us = and i16 %1, -32 ; <i16> [#uses=1 [all...] |
widen_cast-1.ll | 8 define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind { 11 %src.addr = alloca <4 x i16>* ; <<4 x i16>**> [#uses=2] 14 store <4 x i16>* %src, <4 x i16>** %src.addr 28 %tmp4 = load <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1] 29 %arrayidx5 = getelementptr <4 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1] 30 %tmp6 = load <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1 [all...] |
2008-04-24-pblendw-fold-crash.ll | 4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" 10 %tmp126 = bitcast <2 x i64> %tmp122 to <8 x i16> ; <<8 x i16>> [#uses=1] 11 %tmp129 = call <8 x i16> @llvm.x86.sse41.pblendw( <8 x i16> zeroinitializer, <8 x i16> %tmp126, i32 2 ) nounwind ; <<8 x i16>> [#uses=0] 15 declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32) nounwind [all...] |
2009-06-05-VZextByteShort.ll | 6 define <4 x i16> @a(i32* %x1) nounwind { 9 %x = trunc i32 %x3 to i16 10 %r = insertelement <4 x i16> zeroinitializer, i16 %x, i32 0 11 ret <4 x i16> %r 14 define <8 x i16> @b(i32* %x1) nounwind { 17 %x = trunc i32 %x3 to i16 18 %r = insertelement <8 x i16> zeroinitializer, i16 %x, i32 0 19 ret <8 x i16> % [all...] |
field-extract-use-trunc.ll | 29 define i16 @test5(i16 %f12) nounwind { 30 %f11 = shl i16 %f12, 2 31 %tmp7.25 = ashr i16 %f11, 8 32 ret i16 %tmp7.25 35 define i16 @test6(i16 %f12) nounwind { 36 %f11 = shl i16 %f12, 8 37 %tmp7.25 = ashr i16 %f11, 8 38 ret i16 %tmp7.2 [all...] |
ins_subreg_coalesce-3.ll | 3 %struct.COMPOSITE = type { i8, i16, i16 } 4 %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 } 5 %struct.FILE_POS = type { i8, i8, i16, i32 } 7 %struct.FONT_INFO = type { %struct.metrics*, i8*, i16*, %struct.COMPOSITE*, i32, %struct.rec*, %struct.rec*, i16, i16, i16*, i8*, i8*, i16* } [all...] |
/external/llvm/test/Analysis/BasicAA/ |
intrinsics.ll | 3 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32" 8 ; CHECK: define <8 x i16> @test0(i8* noalias %p, i8* noalias %q, <8 x i16> %y) { 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 12 ; CHECK-NEXT: %c = add <8 x i16> %a, %a 13 define <8 x i16> @test0(i8* noalias %p, i8* noalias %q, <8 x i16> %y) { 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16 [all...] |
/external/llvm/test/ExecutionEngine/ |
test-cast.ll | 11 zext i1 true to i16 ; <i16>:4 [#uses=0] 12 zext i1 true to i16 ; <i16>:5 [#uses=0] 23 sext i8 4 to i16 ; <i16>:16 [#uses=0] 24 sext i8 4 to i16 ; <i16>:17 [#uses=0] 33 zext i8 4 to i16 ; <i16>:26 [#uses=0 [all...] |
/external/llvm/test/CodeGen/ARM/ |
vshiftins.ll | 12 define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 > [all...] |
2009-11-02-NegativeLane.ll | 2 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" 11 %0 = load i16* undef, align 2 12 %1 = insertelement <8 x i16> undef, i16 %0, i32 2 13 %2 = insertelement <8 x i16> %1, i16 undef, i32 3 14 %3 = mul <8 x i16> %2, %2 15 %4 = extractelement <8 x i16> %3, i32 2 16 store i16 %4, i16* undef, align [all...] |
vshll.ll | 3 define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind { 7 %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >) 8 ret <8 x i16> %tmp2 11 define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind { 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 > [all...] |
fp16.ll | 3 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32" 6 @x = global i16 12902 7 @y = global i16 0 8 @z = common global i16 0 14 %0 = load i16* @x, align 2 15 %1 = load i16* @y, align 2 16 %2 = tail call float @llvm.convert.from.fp16(i16 %0) 19 %3 = tail call float @llvm.convert.from.fp16(i16 %1) 23 %5 = tail call i16 @llvm.convert.to.fp16(float %4) 26 store i16 %5, i16* @x, align [all...] |
/external/llvm/test/CodeGen/Thumb/ |
rev.ll | 23 %tmp1.upgrd.1 = trunc i32 %tmp1 to i16 24 %tmp3 = trunc i32 %X to i16 25 %tmp2 = and i16 %tmp1.upgrd.1, 255 26 %tmp4 = shl i16 %tmp3, 8 27 %tmp5 = or i16 %tmp2, %tmp4 28 %tmp5.upgrd.2 = sext i16 %tmp5 to i32 33 define i32 @test3(i16 zeroext %a) nounwind { 37 %0 = tail call i16 @llvm.bswap.i16(i16 %a [all...] |
/external/llvm/test/CodeGen/PowerPC/ |
vec_mul.ll | 11 define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) { 12 %tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1] 13 %tmp2 = load <8 x i16>* %Y ; <<8 x i16>> [#uses=1] 14 %tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1] 15 ret <8 x i16> %tmp [all...] |
Atomics-32.ll | 2 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128" 7 @ss = common global i16 0 8 @us = common global i16 0 20 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* 21 %3 = atomicrmw add i16* %2, i16 1 monotonic 22 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* 23 %5 = atomicrmw add i16* %4, i16 1 monotoni [all...] |
Atomics-64.ll | 9 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128" 14 @ss = common global i16 0 15 @us = common global i16 0 27 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* 28 %3 = atomicrmw add i16* %2, i16 1 monotonic 29 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* 30 %5 = atomicrmw add i16* %4, i16 1 monotoni [all...] |
/external/llvm/test/CodeGen/MSP430/ |
2009-12-22-InlineAsm.ll | 4 target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8-n8:16" 9 define i16 @main() noreturn nounwind { 11 %0 = tail call i8* asm "", "=r,0"(i8* getelementptr inbounds ([10 x i8]* @buf, i16 0, i16 0)) nounwind ; <i8*> [#uses=1] 12 %sub.ptr = getelementptr inbounds i8* %0, i16 1 ; <i8*> [#uses=1] 13 %sub.ptr.lhs.cast = ptrtoint i8* %sub.ptr to i16 ; <i16> [#uses=1] 14 %sub.ptr.sub = sub i16 %sub.ptr.lhs.cast, ptrtoint ([10 x i8]* @buf to i16) ; <i16> [#uses=1 [all...] |
indirectbr2.ll | 4 define internal i16 @foo(i16 %i) nounwind { 6 %tmp1 = getelementptr inbounds [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1] 15 %res.0 = phi i16 [ 385, %L5 ], [ 35, %entry ] ; <i16> [#uses=1] 19 %res.1 = phi i16 [ %res.0, %L4 ], [ 5, %entry ] ; <i16> [#uses=1] 23 %res.2 = phi i16 [ %res.1, %L3 ], [ 1, %entry ] ; <i16> [#uses=1 [all...] |
/external/llvm/test/Transforms/ScalarRepl/ |
2007-11-03-bigendian_apint.ll | 3 %struct.S = type { i16 } 5 define zeroext i1 @f(i16 signext %b) { 7 %b_addr = alloca i16 ; <i16*> [#uses=2] 12 store i16 %b, i16* %b_addr 13 %tmp1 = getelementptr %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1] 14 %tmp2 = load i16* %b_addr, align 2 ; <i16> [#uses=1] 15 store i16 %tmp2, i16* %tmp1, align [all...] |
/external/llvm/test/Analysis/ScalarEvolution/ |
fold.ll | 3 define i16 @test1(i8 %x) { 5 %B = sext i12 %A to i16 6 ; CHECK: zext i8 %x to i16 7 ret i16 %B 11 %A = zext i8 %x to i16 12 %B = add i16 %A, 1025 13 %C = trunc i16 %B to i8 19 %A = zext i8 %x to i16 20 %B = mul i16 %A, 1027 21 %C = trunc i16 %B to i [all...] |