/external/llvm/test/CodeGen/X86/ |
shift-coalesce.ll | 8 define i64 @foo(i64 %x, i64* %X) { 9 %tmp.1 = load i64* %X ; <i64> [#uses=1] 10 %tmp.3 = trunc i64 %tmp.1 to i8 ; <i8> [#uses=1] 11 %shift.upgrd.1 = zext i8 %tmp.3 to i64 ; <i64> [#uses=1] 12 %tmp.4 = shl i64 %x, %shift.upgrd.1 ; <i64> [#uses=1 [all...] |
subreg-to-reg-6.ll | 3 define i64 @foo() nounwind { 18 %a = phi i64 [ 0, %bb ], [ 0, %entry ] 19 tail call void asm "", "{cx}"(i64 %a) nounwind 20 %t15 = and i64 %a, 4294967295 21 ret i64 %t15 24 define i64 @bar(i64 %t0) nounwind { 25 call void asm "", "{cx}"(i64 0) nounwind 26 %t1 = sub i64 0, %t0 27 %t2 = and i64 %t1, 429496729 [all...] |
vec_shuffle-11.ll | 5 %tmp131 = call <2 x i64> @llvm.x86.sse2.psrl.dq( <2 x i64> < i64 -1, i64 -1 >, i32 96 ) ; <<2 x i64>> [#uses=1] 6 %tmp137 = bitcast <2 x i64> %tmp131 to <4 x i32> ; <<4 x i32>> [#uses=1] 7 %tmp138 = and <4 x i32> %tmp137, bitcast (<2 x i64> < i64 -1, i64 -1 > to <4 x i32>) ; <<4 x i32>> [#uses=1] 11 declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32 [all...] |
atom-bypass-slow-division-64.ll | 5 define i64 @Test_get_quotient(i64 %a, i64 %b) nounwind { 14 %result = sdiv i64 %a, %b 15 ret i64 %result 18 define i64 @Test_get_remainder(i64 %a, i64 %b) nounwind { 27 %result = srem i64 %a, %b 28 ret i64 %resul [all...] |
movbe.ll | 4 declare i64 @llvm.bswap.i64(i64) nounwind readnone 22 define void @test3(i64* %x, i64 %y) nounwind { 23 %bswap = call i64 @llvm.bswap.i64(i64 %y) 24 store i64 %bswap, i64* %x, align [all...] |
2008-04-28-CoalescerBug.ll | 36 %tmp13111 = load i64* null, align 8 ; <i64> [#uses=3] 37 %tmp13116 = lshr i64 %tmp13111, 16 ; <i64> [#uses=1] 38 %tmp1311613117 = trunc i64 %tmp13116 to i32 ; <i32> [#uses=1] 40 %tmp13120 = lshr i64 %tmp13111, 32 ; <i64> [#uses=1] 41 %tmp1312013121 = trunc i64 %tmp13120 to i32 ; <i32> [#uses=1] 43 %tmp13124 = lshr i64 %tmp13111, 48 ; <i64> [#uses=1 [all...] |
2008-07-16-CoalescerCrash.ll | 3 %struct.SV = type { i8*, i64, i64 } 8 declare fastcc i64 @Perl_utf8n_to_uvuni(i8*, i64, i64*, i64) nounwind 10 define fastcc i8* @Perl_pv_uni_display(%struct.SV* %dsv, i8* %spv, i64 %len, i64 %pvlim, i64 %flags) nounwind { 15 tail call fastcc i64 @Perl_utf8n_to_uvuni( i8* null, i64 13, i64* null, i64 255 ) nounwind ; <i64>:0 [#uses=1 [all...] |
2008-08-19-SubAndFetch.ll | 3 @var = external global i64 ; <i64*> [#uses=1] 10 atomicrmw sub i64* @var, i64 1 monotonic
|
2011-06-03-x87chain.ll | 3 define float @chainfail1(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp { 5 %tmp1 = load i64* %a, align 8 8 %conv = sitofp i64 %tmp1 to float 13 %conv5 = sext i32 %div to i64 14 store i64 %conv5, i64* %b, align 8 18 define float @chainfail2(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp { 21 store i64 0, i64* %b, align [all...] |
shl-i64.ll | 3 ; Make sure that we don't generate an illegal i64 extract after LegalizeType. 7 define void @test_cl(<4 x i64>* %dst, <4 x i64>* %src, i32 %idx) { 9 %arrayidx = getelementptr inbounds <4 x i64> * %src, i32 %idx 10 %0 = load <4 x i64> * %arrayidx, align 32 11 %arrayidx1 = getelementptr inbounds <4 x i64> * %dst, i32 %idx 12 %1 = load <4 x i64> * %arrayidx1, align 32 13 %2 = extractelement <4 x i64> %1, i32 0 14 %and = and i64 %2, 63 15 %3 = insertelement <4 x i64> undef, i64 %and, i32 0 [all...] |
sse-align-10.ll | 3 define <2 x i64> @bar(<2 x i64>* %p) nounwind { 4 %t = load <2 x i64>* %p, align 8 5 ret <2 x i64> %t
|
/external/llvm/test/Transforms/InstCombine/ |
loadstore-alignment.ll | 2 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" 4 @x = external global <2 x i64>, align 16 5 @xx = external global [13 x <2 x i64>], align 16 7 define <2 x i64> @static_hem() { 8 %t = getelementptr <2 x i64>* @x, i32 7 9 %tmp1 = load <2 x i64>* %t, align 1 10 ret <2 x i64> %tmp1 13 define <2 x i64> @hem(i32 %i) { 14 %t = getelementptr <2 x i64>* @x, i32 %i 15 %tmp1 = load <2 x i64>* %t, align [all...] |
/external/llvm/test/CodeGen/AArch64/ |
dp-3source.ll | 11 define i64 @test_madd64(i64 %val0, i64 %val1, i64 %val2) { 13 %mid = mul i64 %val1, %val2 14 %res = add i64 %val0, %mid 16 ret i64 %res 27 define i64 @test_msub64(i64 %val0, i64 %val1, i64 %val2) [all...] |
/external/llvm/test/CodeGen/ARM/ |
fixunsdfdi.ll | 4 define hidden i64 @__fixunsdfdi(double %x) nounwind readnone { 6 %x14 = bitcast double %x to i64 ; <i64> [#uses=1] 13 %u.in.mask = and i64 %x14, -4294967296 ; <i64> [#uses=1] 14 %.ins = or i64 0, %u.in.mask ; <i64> [#uses=1] 15 %0 = bitcast i64 %.ins to double ; <double> [#uses=1] 19 %4 = zext i32 %3 to i64 ; <i64> [#uses=1 [all...] |
arguments-nosplit-i64.ll | 4 define i32 @f(i64 %z, i32 %a, i64 %b) { 5 %tmp = call i32 @g(i64 %b) 9 declare i32 @g(i64)
|
arguments3.ll | 4 define i64 @f(i32 %a, i128 %b) { 5 %tmp = call i64 @g(i128 %b) 6 ret i64 %tmp 9 declare i64 @g(i128)
|
/external/llvm/test/CodeGen/PowerPC/ |
2008-03-24-AddressRegImm.ll | 5 %tmp2627 = ptrtoint i8* %rec to i64 ; <i64> [#uses=2] 6 %tmp28 = and i64 %tmp2627, -16384 ; <i64> [#uses=2] 7 %tmp2829 = inttoptr i64 %tmp28 to i8* ; <i8*> [#uses=1] 8 %tmp37 = getelementptr i8* %tmp2829, i64 42 ; <i8*> [#uses=1] 10 %tmp4041 = zext i8 %tmp40 to i64 ; <i64> [#uses=1] 11 %tmp42 = shl i64 %tmp4041, 8 ; <i64> [#uses=1 [all...] |
2004-11-30-shr-var-crash.ll | 5 %shift.upgrd.1 = zext i8 %shamt to i64 ; <i64> [#uses=1] 6 %tr2 = ashr i64 1, %shift.upgrd.1 ; <i64> [#uses=0]
|
addc.ll | 4 define i64 @add_ll(i64 %a, i64 %b) nounwind { 6 %tmp.2 = add i64 %b, %a ; <i64> [#uses=1] 7 ret i64 %tmp.2 14 define i64 @add_l_5(i64 %a) nounwind { 16 %tmp.1 = add i64 %a, 5 ; <i64> [#uses=1 [all...] |
/external/llvm/test/CodeGen/Thumb2/ |
carry.ll | 3 define i64 @f1(i64 %a, i64 %b) { 8 %tmp = sub i64 %a, %b 9 ret i64 %tmp 12 define i64 @f2(i64 %a, i64 %b) { 19 %tmp1 = shl i64 %a, 1 20 %tmp2 = sub i64 %tmp1, % [all...] |
/external/llvm/test/Transforms/ScalarRepl/ |
2009-02-05-LoadFCA.ll | 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" 7 define i32 @f({ i64, i64 }) nounwind { 9 %tmp = alloca { i64, i64 }, align 8 ; <{ i64, i64 }*> [#uses=2] 10 store { i64, i64 } %0, { i64, i64 }* %tm [all...] |
/external/llvm/test/Bitcode/ |
ptest-new.ll | 3 define i32 @foo(<2 x i64> %bar) nounwind { 5 ; CHECK: call i32 @llvm.x86.sse41.ptestc(<2 x i64> 6 %res1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %bar, <2 x i64> %bar) 7 ; CHECK: call i32 @llvm.x86.sse41.ptestz(<2 x i64> 8 %res2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %bar, <2 x i64> %bar) 9 ; CHECK: call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> 10 %res3 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %bar, <2 x i64> %bar [all...] |
ssse3_palignr.ll | 6 %0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1] 7 %1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1] 8 %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 15) ; <<2 x i64>> [#uses=1] 9 %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1] 15 %0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1 [all...] |
/external/llvm/test/Assembler/ |
2004-01-20-MaxLongLong.ll | 3 global i64 -9223372036854775808
|
/external/llvm/test/CodeGen/Mips/ |
mips64directive.ll | 3 @gl = global i64 1250999896321, align 8 6 define i64 @foo1() nounwind readonly { 8 %0 = load i64* @gl, align 8 9 ret i64 %0
|