/external/llvm/test/CodeGen/PowerPC/ |
vec_splat.ll | 48 define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind { 49 %tmp.upgrd.1 = insertelement <8 x i16> undef, i16 %tmp, i32 0 50 %tmp72 = insertelement <8 x i16> %tmp.upgrd.1, i16 %tmp, i32 1 51 %tmp73 = insertelement <8 x i16> %tmp72, i16 %tmp, i32 2 52 %tmp74 = insertelement <8 x i16> %tmp73, i16 %tmp, i32 3 53 %tmp75 = insertelement <8 x i16> %tmp74, i16 %tmp, i32 4 [all...] |
illegal-element-type.ll | 20 %V.0 = phi <8 x i16> [ %tmp42, %bb45 ], [ zeroinitializer, %bb30 ] ; <<8 x i16>> [#uses=1] 21 %tmp42 = mul <8 x i16> zeroinitializer, %V.0 ; <<8 x i16>> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
avx2-phaddsub.ll | 5 define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) { 6 %a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30> 7 %b = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31> 8 %r = add <16 x i16> %a, %b 9 ret <16 x i16> %r 14 define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) [all...] |
divide-by-constant.ll | 2 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" 5 define zeroext i16 @test1(i16 zeroext %x) nounwind { 7 %div = udiv i16 %x, 33 8 ret i16 %div 15 define zeroext i16 @test2(i8 signext %x, i16 zeroext %c) nounwind readnone ssp noredzone { 17 %div = udiv i16 %c, 3 18 ret i16 %div 38 define signext i16 @test4(i16 signext %x) nounwind [all...] |
Atomics-64.ll | 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" 8 @ss = common global i16 0 9 @us = common global i16 0 21 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* 22 %3 = atomicrmw add i16* %2, i16 1 monotonic 23 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* 24 %5 = atomicrmw add i16* %4, i16 1 monotoni [all...] |
avx2-shift.ll | 84 define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone { 85 %s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2 [all...] |
2006-05-01-SchedCausingSpills.ll | 20 %tmp98 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp75, <4 x i32> %tmp89 ) ; <<4 x i32>> [#uses=1] 21 %tmp102 = bitcast <8 x i16> %tmp98 to <8 x i16> ; <<8 x i16>> [#uses=1] 22 %tmp.upgrd.1 = shufflevector <8 x i16> %tmp102, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1] 23 %tmp105 = shufflevector <8 x i16> %tmp.upgrd.1, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1 [all...] |
/external/llvm/test/Transforms/InstCombine/ |
load-cmp.ll | 3 @G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85, 4 i16 73, i16 82, i16 69, i16 68, i16 0 [all...] |
sext.ll | 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" 74 %s = trunc i32 %e to i16 75 %n = sext i16 %s to i32 84 define i16 @test9(i16 %t, i1 %cond) nounwind { 88 %t2 = sext i16 %t to i32 93 %W = trunc i32 %V to i16 94 ret i16 %W 99 ; CHECK-NEXT: phi i16 100 ; CHECK-NEXT: ret i16 [all...] |
/external/llvm/test/CodeGen/ARM/ |
vbits.ll | 12 define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 18 ret <4 x i16> %tmp3 48 define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind [all...] |
vcnt.ll | 30 define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { 32 ;CHECK: vclz.i16 33 %tmp1 = load <4 x i16>* %A 34 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) 35 ret <4 x i16> %tmp2 54 define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind { 56 ;CHECK: vclz.i16 [all...] |
vneg.ll | 11 define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind { 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 43 define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind { 46 %tmp1 = load <8 x i16>* %A 47 %tmp2 = sub <8 x i16> zeroinitializer, %tmp1 48 ret <8 x i16> %tmp [all...] |
vcge.ll | 13 define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 16 %tmp1 = load <4 x i16>* %A 17 %tmp2 = load <4 x i16>* %B 18 %tmp3 = icmp sge <4 x i16> %tmp1, %tmp2 19 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 20 ret <4 x i16> %tmp4 43 define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind [all...] |
vpadd.ll | 12 define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 14 ;CHECK: vpadd.i16 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 18 ret <4 x i16> %tmp [all...] |
vrev.ll | 11 define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind { 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 16 ret <4 x i16> %tmp2 43 define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind { 46 %tmp1 = load <8 x i16>* %A 47 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4 [all...] |
2008-05-19-ScavengerAssert.ll | 3 %struct.Decoders = type { i32**, i16***, i16****, i16***, i16**, i8**, i8** }
|
2009-07-22-SchedulerAssert.ll | 3 %struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* } 5 %struct.cli_ac_patt = type { i16*, i16*, i16, i16, i8, i32, i32, i8*, i8*, i32, i16, i16, i16, i16, %struct.cli_ac_alt**, i8, i16, %struct.cli_ac_patt*, %struct.cli_ac_patt* [all...] |
2010-06-29-SubregImpDefs.ll | 10 %val4723 = load <8 x i16>* undef ; <<8 x i16>> [#uses=1] 11 call void @PrintShortX(i8* getelementptr inbounds ([21 x i8]* @.str271, i32 0, i32 0), <8 x i16> %val4723, i32 0) nounwind 15 declare void @PrintShortX(i8*, <8 x i16>, i32) nounwind
|
neon_shift.ll | 4 define <4 x i16> @t1(<4 x i32> %a) nounwind { 7 %x = tail call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %a, <4 x i32> <i32 -13, i32 -13, i32 -13, i32 -13>) 8 ret <4 x i16> %x 11 declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
|
vmla.ll | 14 define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { 16 ;CHECK: vmla.i16 17 %tmp1 = load <4 x i16>* %A 18 %tmp2 = load <4 x i16>* %B 19 %tmp3 = load <4 x i16>* %C 20 %tmp4 = mul <4 x i16> %tmp2, %tmp3 21 %tmp5 = add <4 x i16> %tmp1, %tmp [all...] |
vmls.ll | 14 define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { 16 ;CHECK: vmls.i16 17 %tmp1 = load <4 x i16>* %A 18 %tmp2 = load <4 x i16>* %B 19 %tmp3 = load <4 x i16>* %C 20 %tmp4 = mul <4 x i16> %tmp2, %tmp3 21 %tmp5 = sub <4 x i16> %tmp1, %tmp [all...] |
vmov.ll | 9 define <4 x i16> @v_movi16a() nounwind { 11 ;CHECK: vmov.i16 d{{.*}}, #0x10 12 ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 > 15 define <4 x i16> @v_movi16b() nounwind { 17 ;CHECK: vmov.i16 d{{.*}}, #0x1000 18 ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 [all...] |
/external/clang/test/CodeGen/ |
2009-06-14-anonymous-union-init.c | 1 // RUN: %clang_cc1 -emit-llvm < %s | grep "zeroinitializer, i16 16877"
|
count-builtins.c | 10 // CHECK: call i16 @llvm.ctlz.i16 11 // CHECK: call i16 @llvm.cttz.i16
|
/external/llvm/test/Analysis/ScalarEvolution/ |
2008-08-04-IVOverflow.ll | 10 trunc i32 %i.0 to i16 11 add i16 %0, %x16.0 17 %x16.0 = phi i16 [ 0, %entry ], [ %1, %bb ] 22 zext i16 %x16.0 to i32
|